Skip to content

Commit

Permalink
Merge pull request #173 from Aleph-Alpha/PHS-622-expose-pool-size-in-…
Browse files Browse the repository at this point in the history
…constructor

feat: Client exposes HTTPAdapter pool size
  • Loading branch information
ahartel authored Jun 28, 2024
2 parents e8067f6 + 99fd516 commit 29d67ec
Show file tree
Hide file tree
Showing 3 changed files with 29 additions and 16 deletions.
7 changes: 6 additions & 1 deletion aleph_alpha_client/aleph_alpha_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -167,6 +167,7 @@ def __init__(
nice: bool = False,
verify_ssl=True,
tags: Optional[Sequence[str]] = None,
pool_size: int = 10,
) -> None:
if host[-1] != "/":
host += "/"
Expand All @@ -184,7 +185,11 @@ def __init__(
allowed_methods=["POST", "GET"],
raise_on_status=False,
)
adapter = HTTPAdapter(max_retries=retry_strategy)
adapter = HTTPAdapter(
max_retries=retry_strategy,
pool_connections=pool_size,
pool_maxsize=pool_size,
)
self.session = requests.Session()
self.session.verify = verify_ssl
self.session.headers = CaseInsensitiveDict(
Expand Down
8 changes: 5 additions & 3 deletions aleph_alpha_client/completion.py
Original file line number Diff line number Diff line change
Expand Up @@ -284,9 +284,11 @@ def from_json(json: Dict[str, Any]) -> "CompletionResponse":
],
num_tokens_prompt_total=json["num_tokens_prompt_total"],
num_tokens_generated=json["num_tokens_generated"],
optimized_prompt=Prompt.from_json(optimized_prompt_json)
if optimized_prompt_json
else None,
optimized_prompt=(
Prompt.from_json(optimized_prompt_json)
if optimized_prompt_json
else None
),
)

def to_json(self) -> Mapping[str, Any]:
Expand Down
30 changes: 18 additions & 12 deletions aleph_alpha_client/explanation.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,9 +172,9 @@ class ExplanationRequest:
control_factor: Optional[float] = None
control_token_overlap: Optional[ControlTokenOverlap] = None
control_log_additive: Optional[bool] = None
prompt_granularity: Optional[
Union[PromptGranularity, str, CustomGranularity]
] = None
prompt_granularity: Optional[Union[PromptGranularity, str, CustomGranularity]] = (
None
)
target_granularity: Optional[TargetGranularity] = None
postprocessing: Optional[ExplanationPostprocessing] = None
normalize: Optional[bool] = None
Expand Down Expand Up @@ -357,9 +357,11 @@ def from_json(item: Dict[str, Any]) -> "TextPromptItemExplanation":
def with_text(self, prompt: Text) -> "TextPromptItemExplanation":
return TextPromptItemExplanation(
scores=[
TextScoreWithRaw.from_text_score(score, prompt)
if isinstance(score, TextScore)
else score
(
TextScoreWithRaw.from_text_score(score, prompt)
if isinstance(score, TextScore)
else score
)
for score in self.scores
]
)
Expand All @@ -386,9 +388,11 @@ def from_json(item: Dict[str, Any]) -> "TargetPromptItemExplanation":
def with_text(self, prompt: str) -> "TargetPromptItemExplanation":
return TargetPromptItemExplanation(
scores=[
TargetScoreWithRaw.from_target_score(score, prompt)
if isinstance(score, TargetScore)
else score
(
TargetScoreWithRaw.from_target_score(score, prompt)
if isinstance(score, TargetScore)
else score
)
for score in self.scores
]
)
Expand Down Expand Up @@ -461,9 +465,11 @@ def with_image_prompt_items_in_pixels(self, prompt: Prompt) -> "Explanation":
return Explanation(
target=self.target,
items=[
item.in_pixels(prompt.items[item_index])
if isinstance(item, ImagePromptItemExplanation)
else item
(
item.in_pixels(prompt.items[item_index])
if isinstance(item, ImagePromptItemExplanation)
else item
)
for item_index, item in enumerate(self.items)
],
)
Expand Down

0 comments on commit 29d67ec

Please sign in to comment.