Added LiteLLM to the stack

This commit is contained in:
2025-08-18 09:40:50 +00:00
parent 0648c1968c
commit d220b04e32
2682 changed files with 533609 additions and 1 deletions

View File

@@ -0,0 +1,44 @@
import os
import sys
sys.path.insert(
0, os.path.abspath(os.path.join(os.path.dirname(__file__), "../../../../.."))
)
from litellm.llms.azure.chat.gpt_transformation import AzureOpenAIConfig
class TestAzureOpenAIConfig:
def test_is_response_format_supported_model(self):
config = AzureOpenAIConfig()
# New logic: Azure deployment names with suffixes and prefixes
assert config._is_response_format_supported_model("azure/gpt-4.1-suffix")
assert config._is_response_format_supported_model("gpt-4.1-suffix")
assert config._is_response_format_supported_model("azure/gpt-4-1-suffix")
assert config._is_response_format_supported_model("gpt-4-1-suffix")
# 4o models (should always be supported)
assert config._is_response_format_supported_model("gpt-4o")
assert config._is_response_format_supported_model("azure/gpt-4o-custom")
# Backwards compatibility: base names
assert config._is_response_format_supported_model("gpt-4.1")
assert config._is_response_format_supported_model("gpt-4-1")
# Negative test: clearly unsupported model
assert not config._is_response_format_supported_model("gpt-3.5-turbo")
assert not config._is_response_format_supported_model("gpt-3-5-turbo")
assert not config._is_response_format_supported_model("gpt-3-5-turbo-suffix")
assert not config._is_response_format_supported_model("gpt-35-turbo-suffix")
assert not config._is_response_format_supported_model("gpt-35-turbo")
def test_map_openai_params_with_preview_api_version():
config = AzureOpenAIConfig()
non_default_params = {
"response_format": {"type": "json_object"},
}
optional_params = {}
model = "azure/gpt-4-1"
drop_params = False
api_version = "preview"
assert config.map_openai_params(
non_default_params, optional_params, model, drop_params, api_version
)

View File

@@ -0,0 +1,30 @@
import json
import os
import sys
import traceback
from typing import Callable, Optional
from unittest.mock import MagicMock, patch
import pytest
sys.path.insert(
0, os.path.abspath("../../../../..")
) # Adds the parent directory to the system path
import litellm
from litellm.llms.azure.chat.o_series_transformation import AzureOpenAIO1Config
@pytest.mark.asyncio
async def test_azure_chat_o_series_transformation():
provider_config = AzureOpenAIO1Config()
model = "o_series/web-interface-o1-mini"
messages = [{"role": "user", "content": "Hello, how are you?"}]
optional_params = {}
litellm_params = {}
headers = {}
response = await provider_config.async_transform_request(
model, messages, optional_params, litellm_params, headers
)
print(response)
assert response["model"] == "web-interface-o1-mini"

View File

@@ -0,0 +1,48 @@
import pytest
import litellm
from litellm.llms.azure.chat.gpt_5_transformation import AzureOpenAIGPT5Config
@pytest.fixture()
def config() -> AzureOpenAIGPT5Config:
return AzureOpenAIGPT5Config()
def test_azure_gpt5_supports_reasoning_effort(config: AzureOpenAIGPT5Config):
assert "reasoning_effort" in config.get_supported_openai_params(model="gpt-5")
assert "reasoning_effort" in config.get_supported_openai_params(model="gpt5_series/my-deployment")
def test_azure_gpt5_maps_max_tokens(config: AzureOpenAIGPT5Config):
params = config.map_openai_params(
non_default_params={"max_tokens": 5},
optional_params={},
model="gpt5_series/gpt-5",
drop_params=False,
api_version="2024-05-01-preview",
)
assert params["max_completion_tokens"] == 5
assert "max_tokens" not in params
def test_azure_gpt5_temperature_error(config: AzureOpenAIGPT5Config):
with pytest.raises(litellm.utils.UnsupportedParamsError):
config.map_openai_params(
non_default_params={"temperature": 0.2},
optional_params={},
model="gpt-5",
drop_params=False,
api_version="2024-05-01-preview",
)
def test_azure_gpt5_series_transform_request(config: AzureOpenAIGPT5Config):
request = config.transform_request(
model="gpt5_series/gpt-5",
messages=[],
optional_params={},
litellm_params={},
headers={},
)
assert request["model"] == "gpt-5"

View File

@@ -0,0 +1,31 @@
import json
import os
import sys
import traceback
from typing import Callable, Optional
from unittest.mock import MagicMock, patch
import pytest
sys.path.insert(
0, os.path.abspath("../../../../..")
) # Adds the parent directory to the system path
import litellm
from litellm.llms.azure.image_generation import (
AzureDallE3ImageGenerationConfig,
get_azure_image_generation_config,
)
@pytest.mark.parametrize(
"received_model, expected_config",
[
("dall-e-3", AzureDallE3ImageGenerationConfig),
("dalle-3", AzureDallE3ImageGenerationConfig),
("openai_dall_e_3", AzureDallE3ImageGenerationConfig),
],
)
def test_azure_image_generation_config(received_model, expected_config):
assert isinstance(
get_azure_image_generation_config(received_model), expected_config
)

View File

@@ -0,0 +1,227 @@
import os
import sys
from unittest.mock import patch
import pytest
sys.path.insert(
0, os.path.abspath("../../../../..")
) # Adds the parent directory to the system path
from litellm.llms.azure.responses.transformation import AzureOpenAIResponsesAPIConfig
from litellm.llms.azure.responses.o_series_transformation import AzureOpenAIOSeriesResponsesAPIConfig
from litellm.types.router import GenericLiteLLMParams
from litellm.types.llms.openai import ResponsesAPIOptionalRequestParams
@pytest.mark.serial
def test_validate_environment_api_key_within_litellm_params():
azure_openai_responses_apiconfig = AzureOpenAIResponsesAPIConfig()
litellm_params = GenericLiteLLMParams(api_key="test-api-key")
result = azure_openai_responses_apiconfig.validate_environment(
headers={}, model="", litellm_params=litellm_params
)
expected = {"api-key": "test-api-key"}
assert result == expected
@pytest.mark.serial
def test_validate_environment_api_key_within_litellm():
azure_openai_responses_apiconfig = AzureOpenAIResponsesAPIConfig()
with patch("litellm.api_key", "test-api-key"):
litellm_params = GenericLiteLLMParams()
result = azure_openai_responses_apiconfig.validate_environment(
headers={}, model="", litellm_params=litellm_params
)
expected = {"api-key": "test-api-key"}
assert result == expected
@pytest.mark.serial
def test_validate_environment_azure_key_within_litellm():
azure_openai_responses_apiconfig = AzureOpenAIResponsesAPIConfig()
with patch("litellm.azure_key", "test-azure-key"):
litellm_params = GenericLiteLLMParams()
result = azure_openai_responses_apiconfig.validate_environment(
headers={}, model="", litellm_params=litellm_params
)
expected = {"api-key": "test-azure-key"}
assert result == expected
@pytest.mark.serial
def test_validate_environment_azure_key_within_headers():
azure_openai_responses_apiconfig = AzureOpenAIResponsesAPIConfig()
headers = {"api-key": "test-api-key-from-headers"}
litellm_params = GenericLiteLLMParams()
result = azure_openai_responses_apiconfig.validate_environment(
headers=headers, model="", litellm_params=litellm_params
)
expected = {"api-key": "test-api-key-from-headers"}
assert result == expected
@pytest.mark.serial
def test_get_complete_url():
"""
Test the get_complete_url function
"""
azure_openai_responses_apiconfig = AzureOpenAIResponsesAPIConfig()
api_base = "https://litellm8397336933.openai.azure.com"
litellm_params = {"api_version": "2024-05-01-preview"}
result = azure_openai_responses_apiconfig.get_complete_url(
api_base=api_base, litellm_params=litellm_params
)
expected = "https://litellm8397336933.openai.azure.com/openai/responses?api-version=2024-05-01-preview"
assert result == expected
@pytest.mark.serial
def test_azure_o_series_responses_api_supported_params():
"""Test that Azure OpenAI O-series responses API excludes temperature from supported parameters."""
config = AzureOpenAIOSeriesResponsesAPIConfig()
supported_params = config.get_supported_openai_params("o_series/gpt-o1")
# Temperature should not be in supported params for O-series models
assert "temperature" not in supported_params
# Other parameters should still be supported
assert "input" in supported_params
assert "max_output_tokens" in supported_params
assert "stream" in supported_params
assert "top_p" in supported_params
@pytest.mark.serial
def test_azure_o_series_responses_api_drop_temperature_param():
"""Test that temperature parameter is dropped when drop_params is True for O-series models."""
config = AzureOpenAIOSeriesResponsesAPIConfig()
# Create request params with temperature
request_params = ResponsesAPIOptionalRequestParams(
temperature=0.7,
max_output_tokens=1000,
stream=False,
top_p=0.9
)
# Test with drop_params=True
mapped_params_with_drop = config.map_openai_params(
response_api_optional_params=request_params,
model="o_series/gpt-o1",
drop_params=True
)
# Temperature should be dropped
assert "temperature" not in mapped_params_with_drop
# Other params should remain
assert mapped_params_with_drop["max_output_tokens"] == 1000
assert mapped_params_with_drop["top_p"] == 0.9
# Test with drop_params=False
mapped_params_without_drop = config.map_openai_params(
response_api_optional_params=request_params,
model="o_series/gpt-o1",
drop_params=False
)
# Temperature should still be present when drop_params=False
assert mapped_params_without_drop["temperature"] == 0.7
assert mapped_params_without_drop["max_output_tokens"] == 1000
assert mapped_params_without_drop["top_p"] == 0.9
@pytest.mark.serial
def test_azure_o_series_responses_api_drop_params_no_temperature():
"""Test that map_openai_params works correctly when temperature is not present for O-series models."""
config = AzureOpenAIOSeriesResponsesAPIConfig()
# Create request params without temperature
request_params = ResponsesAPIOptionalRequestParams(
max_output_tokens=1000,
stream=False,
top_p=0.9
)
# Should work fine even with drop_params=True
mapped_params = config.map_openai_params(
response_api_optional_params=request_params,
model="o_series/gpt-o1",
drop_params=True
)
assert "temperature" not in mapped_params
assert mapped_params["max_output_tokens"] == 1000
assert mapped_params["top_p"] == 0.9
@pytest.mark.serial
def test_azure_regular_responses_api_supports_temperature():
"""Test that regular Azure OpenAI responses API (non-O-series) supports temperature parameter."""
config = AzureOpenAIResponsesAPIConfig()
supported_params = config.get_supported_openai_params("gpt-4o")
# Regular Azure models should support temperature
assert "temperature" in supported_params
# Other parameters should still be supported
assert "input" in supported_params
assert "max_output_tokens" in supported_params
assert "stream" in supported_params
assert "top_p" in supported_params
@pytest.mark.serial
def test_o_series_model_detection():
"""Test that the O-series configuration correctly identifies O-series models."""
config = AzureOpenAIOSeriesResponsesAPIConfig()
# Test explicit o_series naming
assert config.is_o_series_model("o_series/gpt-o1") == True
assert config.is_o_series_model("azure/o_series/gpt-o3") == True
# Test regular models
assert config.is_o_series_model("gpt-4o") == False
assert config.is_o_series_model("gpt-3.5-turbo") == False
@pytest.mark.serial
def test_provider_config_manager_o_series_selection():
"""Test that ProviderConfigManager returns the correct config for O-series vs regular models."""
from litellm.utils import ProviderConfigManager
import litellm
# Test O-series model selection
o_series_config = ProviderConfigManager.get_provider_responses_api_config(
provider=litellm.LlmProviders.AZURE,
model="o_series/gpt-o1"
)
assert isinstance(o_series_config, AzureOpenAIOSeriesResponsesAPIConfig)
# Test regular model selection
regular_config = ProviderConfigManager.get_provider_responses_api_config(
provider=litellm.LlmProviders.AZURE,
model="gpt-4o"
)
assert isinstance(regular_config, AzureOpenAIResponsesAPIConfig)
assert not isinstance(regular_config, AzureOpenAIOSeriesResponsesAPIConfig)
# Test with no model specified (should default to regular)
default_config = ProviderConfigManager.get_provider_responses_api_config(
provider=litellm.LlmProviders.AZURE,
model=None
)
assert isinstance(default_config, AzureOpenAIResponsesAPIConfig)
assert not isinstance(default_config, AzureOpenAIOSeriesResponsesAPIConfig)

File diff suppressed because it is too large Load Diff