Skip to content
Prev Previous commit
Next Next commit
feat: updated current tests
  • Loading branch information
WJPBProjects committed May 20, 2025
commit 02f9c7f9a5bcb5b20e5f1c8cf051ee2460c806e7
4 changes: 3 additions & 1 deletion tests/models/test_litellm_chatcompletions_stream.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ async def test_stream_response_yields_events_for_text_content(monkeypatch) -> No
prompt_tokens=7,
total_tokens=12,
completion_tokens_details=CompletionTokensDetails(reasoning_tokens=2),
prompt_tokens_details=PromptTokensDetails(cached_tokens=5),
prompt_tokens_details=PromptTokensDetails(cached_tokens=6),
),
)

Expand Down Expand Up @@ -122,6 +122,8 @@ async def patched_fetch_response(self, *args, **kwargs):
assert completed_resp.usage.input_tokens == 7
assert completed_resp.usage.output_tokens == 5
assert completed_resp.usage.total_tokens == 12
assert completed_resp.usage.input_tokens_details.cached_tokens == 6
assert completed_resp.usage.output_tokens_details.reasoning_tokens == 2


@pytest.mark.allow_call_model_methods
Expand Down
17 changes: 15 additions & 2 deletions tests/test_openai_chatcompletions.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,10 @@
ChatCompletionMessageToolCall,
Function,
)
from openai.types.completion_usage import CompletionUsage
from openai.types.completion_usage import (
CompletionUsage,
PromptTokensDetails,
)
from openai.types.responses import (
Response,
ResponseFunctionToolCall,
Expand Down Expand Up @@ -51,7 +54,13 @@ async def test_get_response_with_text_message(monkeypatch) -> None:
model="fake",
object="chat.completion",
choices=[choice],
usage=CompletionUsage(completion_tokens=5, prompt_tokens=7, total_tokens=12),
usage=CompletionUsage(
completion_tokens=5,
prompt_tokens=7,
total_tokens=12,
# completion_tokens_details left blank to test default
prompt_tokens_details=PromptTokensDetails(cached_tokens=3),
),
)

async def patched_fetch_response(self, *args, **kwargs):
Expand Down Expand Up @@ -81,6 +90,8 @@ async def patched_fetch_response(self, *args, **kwargs):
assert resp.usage.input_tokens == 7
assert resp.usage.output_tokens == 5
assert resp.usage.total_tokens == 12
assert resp.usage.input_tokens_details.cached_tokens == 3
assert resp.usage.output_tokens_details.reasoning_tokens == 0
assert resp.response_id is None


Expand Down Expand Up @@ -127,6 +138,8 @@ async def patched_fetch_response(self, *args, **kwargs):
assert resp.usage.requests == 0
assert resp.usage.input_tokens == 0
assert resp.usage.output_tokens == 0
assert resp.usage.input_tokens_details.cached_tokens == 0
assert resp.usage.output_tokens_details.reasoning_tokens == 0


@pytest.mark.allow_call_model_methods
Expand Down
16 changes: 14 additions & 2 deletions tests/test_openai_chatcompletions_stream.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,11 @@
ChoiceDeltaToolCall,
ChoiceDeltaToolCallFunction,
)
from openai.types.completion_usage import CompletionUsage
from openai.types.completion_usage import (
CompletionTokensDetails,
CompletionUsage,
PromptTokensDetails,
)
from openai.types.responses import (
Response,
ResponseFunctionToolCall,
Expand Down Expand Up @@ -46,7 +50,13 @@ async def test_stream_response_yields_events_for_text_content(monkeypatch) -> No
model="fake",
object="chat.completion.chunk",
choices=[Choice(index=0, delta=ChoiceDelta(content="llo"))],
usage=CompletionUsage(completion_tokens=5, prompt_tokens=7, total_tokens=12),
usage=CompletionUsage(
completion_tokens=5,
prompt_tokens=7,
total_tokens=12,
prompt_tokens_details=PromptTokensDetails(cached_tokens=2),
completion_tokens_details=CompletionTokensDetails(reasoning_tokens=3),
),
)

async def fake_stream() -> AsyncIterator[ChatCompletionChunk]:
Expand Down Expand Up @@ -112,6 +122,8 @@ async def patched_fetch_response(self, *args, **kwargs):
assert completed_resp.usage.input_tokens == 7
assert completed_resp.usage.output_tokens == 5
assert completed_resp.usage.total_tokens == 12
assert completed_resp.usage.input_tokens_details.cached_tokens == 2
assert completed_resp.usage.output_tokens_details.reasoning_tokens == 3


@pytest.mark.allow_call_model_methods
Expand Down