Added LiteLLM to the stack
This commit is contained in:
35
Development/litellm/.github/workflows/README.md
vendored
Normal file
35
Development/litellm/.github/workflows/README.md
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
# Simple PyPI Publishing
|
||||
|
||||
A GitHub workflow to manually publish LiteLLM packages to PyPI with a specified version.
|
||||
|
||||
## How to Use
|
||||
|
||||
1. Go to the **Actions** tab in the GitHub repository
|
||||
2. Select **Simple PyPI Publish** from the workflow list
|
||||
3. Click **Run workflow**
|
||||
4. Enter the version to publish (e.g., `1.74.10`)
|
||||
|
||||
## What the Workflow Does
|
||||
|
||||
1. **Updates** the version in `pyproject.toml`
|
||||
2. **Copies** the model prices backup file
|
||||
3. **Builds** the Python package
|
||||
4. **Publishes** to PyPI
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Make sure the following secret is configured in the repository:
|
||||
- `PYPI_PUBLISH_PASSWORD`: PyPI API token for authentication
|
||||
|
||||
## Example Usage
|
||||
|
||||
- Version: `1.74.11` → Publishes as v1.74.11
|
||||
- Version: `1.74.10-hotfix1` → Publishes as v1.74.10-hotfix1
|
||||
|
||||
## Features
|
||||
|
||||
- ✅ Manual trigger with version input
|
||||
- ✅ Automatic version updates in `pyproject.toml`
|
||||
- ✅ Repository safety check (only runs on official repo)
|
||||
- ✅ Clean package building and publishing
|
||||
- ✅ Success confirmation with PyPI package link
|
28
Development/litellm/.github/workflows/auto_update_price_and_context_window.yml
vendored
Normal file
28
Development/litellm/.github/workflows/auto_update_price_and_context_window.yml
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
name: Updates model_prices_and_context_window.json and Create Pull Request
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 0 * * 0" # Run every Sundays at midnight
|
||||
#- cron: "0 0 * * *" # Run daily at midnight
|
||||
|
||||
jobs:
|
||||
auto_update_price_and_context_window:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
pip install aiohttp
|
||||
- name: Update JSON Data
|
||||
run: |
|
||||
python ".github/workflows/auto_update_price_and_context_window_file.py"
|
||||
- name: Create Pull Request
|
||||
run: |
|
||||
git add model_prices_and_context_window.json
|
||||
git commit -m "Update model_prices_and_context_window.json file: $(date +'%Y-%m-%d')"
|
||||
gh pr create --title "Update model_prices_and_context_window.json file" \
|
||||
--body "Automated update for model_prices_and_context_window.json" \
|
||||
--head auto-update-price-and-context-window-$(date +'%Y-%m-%d') \
|
||||
--base main
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GH_TOKEN }}
|
121
Development/litellm/.github/workflows/auto_update_price_and_context_window_file.py
vendored
Normal file
121
Development/litellm/.github/workflows/auto_update_price_and_context_window_file.py
vendored
Normal file
@@ -0,0 +1,121 @@
|
||||
import asyncio
|
||||
import aiohttp
|
||||
import json
|
||||
|
||||
# Asynchronously fetch data from a given URL
|
||||
async def fetch_data(url):
|
||||
try:
|
||||
# Create an asynchronous session
|
||||
async with aiohttp.ClientSession() as session:
|
||||
# Send a GET request to the URL
|
||||
async with session.get(url) as resp:
|
||||
# Raise an error if the response status is not OK
|
||||
resp.raise_for_status()
|
||||
# Parse the response JSON
|
||||
resp_json = await resp.json()
|
||||
print("Fetch the data from URL.")
|
||||
# Return the 'data' field from the JSON response
|
||||
return resp_json['data']
|
||||
except Exception as e:
|
||||
# Print an error message if fetching data fails
|
||||
print("Error fetching data from URL:", e)
|
||||
return None
|
||||
|
||||
# Synchronize local data with remote data
|
||||
def sync_local_data_with_remote(local_data, remote_data):
|
||||
# Update existing keys in local_data with values from remote_data
|
||||
for key in (set(local_data) & set(remote_data)):
|
||||
local_data[key].update(remote_data[key])
|
||||
|
||||
# Add new keys from remote_data to local_data
|
||||
for key in (set(remote_data) - set(local_data)):
|
||||
local_data[key] = remote_data[key]
|
||||
|
||||
# Write data to the json file
|
||||
def write_to_file(file_path, data):
|
||||
try:
|
||||
# Open the file in write mode
|
||||
with open(file_path, "w") as file:
|
||||
# Dump the data as JSON into the file
|
||||
json.dump(data, file, indent=4)
|
||||
print("Values updated successfully.")
|
||||
except Exception as e:
|
||||
# Print an error message if writing to file fails
|
||||
print("Error updating JSON file:", e)
|
||||
|
||||
# Update the existing models and add the missing models
|
||||
def transform_remote_data(data):
|
||||
transformed = {}
|
||||
for row in data:
|
||||
# Add the fields 'max_tokens' and 'input_cost_per_token'
|
||||
obj = {
|
||||
"max_tokens": row["context_length"],
|
||||
"input_cost_per_token": float(row["pricing"]["prompt"]),
|
||||
}
|
||||
|
||||
# Add 'max_output_tokens' as a field if it is not None
|
||||
if "top_provider" in row and "max_completion_tokens" in row["top_provider"] and row["top_provider"]["max_completion_tokens"] is not None:
|
||||
obj['max_output_tokens'] = int(row["top_provider"]["max_completion_tokens"])
|
||||
|
||||
# Add the field 'output_cost_per_token'
|
||||
obj.update({
|
||||
"output_cost_per_token": float(row["pricing"]["completion"]),
|
||||
})
|
||||
|
||||
# Add field 'input_cost_per_image' if it exists and is non-zero
|
||||
if "pricing" in row and "image" in row["pricing"] and float(row["pricing"]["image"]) != 0.0:
|
||||
obj['input_cost_per_image'] = float(row["pricing"]["image"])
|
||||
|
||||
# Add the fields 'litellm_provider' and 'mode'
|
||||
obj.update({
|
||||
"litellm_provider": "openrouter",
|
||||
"mode": "chat"
|
||||
})
|
||||
|
||||
# Add the 'supports_vision' field if the modality is 'multimodal'
|
||||
if row.get('architecture', {}).get('modality') == 'multimodal':
|
||||
obj['supports_vision'] = True
|
||||
|
||||
# Use a composite key to store the transformed object
|
||||
transformed[f'openrouter/{row["id"]}'] = obj
|
||||
|
||||
return transformed
|
||||
|
||||
|
||||
# Load local data from a specified file
|
||||
def load_local_data(file_path):
|
||||
try:
|
||||
# Open the file in read mode
|
||||
with open(file_path, "r") as file:
|
||||
# Load and return the JSON data
|
||||
return json.load(file)
|
||||
except FileNotFoundError:
|
||||
# Print an error message if the file is not found
|
||||
print("File not found:", file_path)
|
||||
return None
|
||||
except json.JSONDecodeError as e:
|
||||
# Print an error message if JSON decoding fails
|
||||
print("Error decoding JSON:", e)
|
||||
return None
|
||||
|
||||
def main():
|
||||
local_file_path = "model_prices_and_context_window.json" # Path to the local data file
|
||||
url = "https://openrouter.ai/api/v1/models" # URL to fetch remote data
|
||||
|
||||
# Load local data from file
|
||||
local_data = load_local_data(local_file_path)
|
||||
# Fetch remote data asynchronously
|
||||
remote_data = asyncio.run(fetch_data(url))
|
||||
# Transform the fetched remote data
|
||||
remote_data = transform_remote_data(remote_data)
|
||||
|
||||
# If both local and remote data are available, synchronize and save
|
||||
if local_data and remote_data:
|
||||
sync_local_data_with_remote(local_data, remote_data)
|
||||
write_to_file(local_file_path, local_data)
|
||||
else:
|
||||
print("Failed to fetch model data from either local file or URL.")
|
||||
|
||||
# Entry point of the script
|
||||
if __name__ == "__main__":
|
||||
main()
|
440
Development/litellm/.github/workflows/ghcr_deploy.yml
vendored
Normal file
440
Development/litellm/.github/workflows/ghcr_deploy.yml
vendored
Normal file
@@ -0,0 +1,440 @@
|
||||
# this workflow is triggered by an API call when there is a new PyPI release of LiteLLM
|
||||
name: Build, Publish LiteLLM Docker Image. New Release
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tag:
|
||||
description: "The tag version you want to build"
|
||||
release_type:
|
||||
description: "The release type you want to build. Can be 'latest', 'stable', 'dev', 'rc'"
|
||||
type: string
|
||||
default: "latest"
|
||||
commit_hash:
|
||||
description: "Commit hash"
|
||||
required: true
|
||||
|
||||
# Defines two custom environment variables for the workflow. Used for the Container registry domain, and a name for the Docker image that this workflow builds.
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}
|
||||
CHART_NAME: litellm-helm
|
||||
|
||||
# There is a single job in this workflow. It's configured to run on the latest available version of Ubuntu.
|
||||
jobs:
|
||||
# print commit hash, tag, and release type
|
||||
print:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: |
|
||||
echo "Commit hash: ${{ github.event.inputs.commit_hash }}"
|
||||
echo "Tag: ${{ github.event.inputs.tag }}"
|
||||
echo "Release type: ${{ github.event.inputs.release_type }}"
|
||||
docker-hub-deploy:
|
||||
if: github.repository == 'BerriAI/litellm'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event.inputs.commit_hash }}
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
-
|
||||
name: Build and push
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: litellm/litellm:${{ github.event.inputs.tag || 'latest' }}
|
||||
-
|
||||
name: Build and push litellm-database image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
file: ./docker/Dockerfile.database
|
||||
tags: litellm/litellm-database:${{ github.event.inputs.tag || 'latest' }}
|
||||
-
|
||||
name: Build and push litellm-spend-logs image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
file: ./litellm-js/spend-logs/Dockerfile
|
||||
tags: litellm/litellm-spend_logs:${{ github.event.inputs.tag || 'latest' }}
|
||||
-
|
||||
name: Build and push litellm-non_root image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
file: ./docker/Dockerfile.non_root
|
||||
tags: litellm/litellm-non_root:${{ github.event.inputs.tag || 'latest' }}
|
||||
build-and-push-image:
|
||||
runs-on: ubuntu-latest
|
||||
# Sets the permissions granted to the `GITHUB_TOKEN` for the actions in this job.
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event.inputs.commit_hash }}
|
||||
# Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here.
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
# This step uses [docker/metadata-action](https://github.com/docker/metadata-action#about) to extract tags and labels that will be applied to the specified image. The `id` "meta" allows the output of this step to be referenced in a subsequent step. The `images` value provides the base name for the tags and labels.
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
# Configure multi platform Docker builds
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@e0e4588fad221d38ee467c0bffd91115366dc0c5
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@edfb0fe6204400c56fbfd3feba3fe9ad1adfa345
|
||||
# This step uses the `docker/build-push-action` action to build the image, based on your repository's `Dockerfile`. If the build succeeds, it pushes the image to GitHub Packages.
|
||||
# It uses the `context` parameter to define the build's context as the set of files located in the specified path. For more information, see "[Usage](https://github.com/docker/build-push-action#usage)" in the README of the `docker/build-push-action` repository.
|
||||
# It uses the `tags` and `labels` parameters to tag and label the image with the output from the "meta" step.
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@4976231911ebf5f32aad765192d35f942aa48cb8
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: |
|
||||
${{ steps.meta.outputs.tags }}-${{ github.event.inputs.tag || 'latest' }},
|
||||
${{ steps.meta.outputs.tags }}-${{ github.event.inputs.release_type }}
|
||||
${{ (github.event.inputs.release_type == 'stable' || github.event.inputs.release_type == 'rc') && format('{0}/berriai/litellm:main-{1}', env.REGISTRY, github.event.inputs.tag) || '' }},
|
||||
${{ github.event.inputs.release_type == 'stable' && format('{0}/berriai/litellm:main-stable', env.REGISTRY) || '' }},
|
||||
${{ (github.event.inputs.release_type == 'stable' || github.event.inputs.release_type == 'rc') && format('{0}/berriai/litellm:{1}', env.REGISTRY, github.event.inputs.tag) || '' }},
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
platforms: local,linux/amd64,linux/arm64,linux/arm64/v8
|
||||
|
||||
build-and-push-image-ee:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event.inputs.commit_hash }}
|
||||
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata (tags, labels) for EE Dockerfile
|
||||
id: meta-ee
|
||||
uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}-ee
|
||||
# Configure multi platform Docker builds
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@e0e4588fad221d38ee467c0bffd91115366dc0c5
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@edfb0fe6204400c56fbfd3feba3fe9ad1adfa345
|
||||
|
||||
- name: Build and push EE Docker image
|
||||
uses: docker/build-push-action@f2a1d5e99d037542a71f64918e516c093c6f3fc4
|
||||
with:
|
||||
context: .
|
||||
file: Dockerfile
|
||||
push: true
|
||||
tags: |
|
||||
${{ steps.meta-ee.outputs.tags }}-${{ github.event.inputs.tag || 'latest' }},
|
||||
${{ steps.meta-ee.outputs.tags }}-${{ github.event.inputs.release_type }}
|
||||
${{ (github.event.inputs.release_type == 'stable' || github.event.inputs.release_type == 'rc') && format('{0}/berriai/litellm-ee:main-{1}', env.REGISTRY, github.event.inputs.tag) || '' }},
|
||||
${{ github.event.inputs.release_type == 'stable' && format('{0}/berriai/litellm-ee:main-stable', env.REGISTRY) || '' }}
|
||||
labels: ${{ steps.meta-ee.outputs.labels }}
|
||||
platforms: local,linux/amd64,linux/arm64,linux/arm64/v8
|
||||
|
||||
build-and-push-image-database:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event.inputs.commit_hash }}
|
||||
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata (tags, labels) for database Dockerfile
|
||||
id: meta-database
|
||||
uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}-database
|
||||
# Configure multi platform Docker builds
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@e0e4588fad221d38ee467c0bffd91115366dc0c5
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@edfb0fe6204400c56fbfd3feba3fe9ad1adfa345
|
||||
|
||||
- name: Build and push Database Docker image
|
||||
uses: docker/build-push-action@f2a1d5e99d037542a71f64918e516c093c6f3fc4
|
||||
with:
|
||||
context: .
|
||||
file: ./docker/Dockerfile.database
|
||||
push: true
|
||||
tags: |
|
||||
${{ steps.meta-database.outputs.tags }}-${{ github.event.inputs.tag || 'latest' }},
|
||||
${{ steps.meta-database.outputs.tags }}-${{ github.event.inputs.release_type }}
|
||||
${{ (github.event.inputs.release_type == 'stable' || github.event.inputs.release_type == 'rc') && format('{0}/berriai/litellm-database:main-{1}', env.REGISTRY, github.event.inputs.tag) || '' }},
|
||||
${{ github.event.inputs.release_type == 'stable' && format('{0}/berriai/litellm-database:main-stable', env.REGISTRY) || '' }}
|
||||
labels: ${{ steps.meta-database.outputs.labels }}
|
||||
platforms: local,linux/amd64,linux/arm64,linux/arm64/v8
|
||||
|
||||
build-and-push-image-non_root:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event.inputs.commit_hash }}
|
||||
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata (tags, labels) for non_root Dockerfile
|
||||
id: meta-non_root
|
||||
uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}-non_root
|
||||
# Configure multi platform Docker builds
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@e0e4588fad221d38ee467c0bffd91115366dc0c5
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@edfb0fe6204400c56fbfd3feba3fe9ad1adfa345
|
||||
|
||||
- name: Build and push non_root Docker image
|
||||
uses: docker/build-push-action@f2a1d5e99d037542a71f64918e516c093c6f3fc4
|
||||
with:
|
||||
context: .
|
||||
file: ./docker/Dockerfile.non_root
|
||||
push: true
|
||||
tags: |
|
||||
${{ steps.meta-non_root.outputs.tags }}-${{ github.event.inputs.tag || 'latest' }},
|
||||
${{ steps.meta-non_root.outputs.tags }}-${{ github.event.inputs.release_type }}
|
||||
${{ (github.event.inputs.release_type == 'stable' || github.event.inputs.release_type == 'rc') && format('{0}/berriai/litellm-non_root:main-{1}', env.REGISTRY, github.event.inputs.tag) || '' }},
|
||||
${{ github.event.inputs.release_type == 'stable' && format('{0}/berriai/litellm-non_root:main-stable', env.REGISTRY) || '' }}
|
||||
labels: ${{ steps.meta-non_root.outputs.labels }}
|
||||
platforms: local,linux/amd64,linux/arm64,linux/arm64/v8
|
||||
|
||||
build-and-push-image-spend-logs:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event.inputs.commit_hash }}
|
||||
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata (tags, labels) for spend-logs Dockerfile
|
||||
id: meta-spend-logs
|
||||
uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}-spend_logs
|
||||
# Configure multi platform Docker builds
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@e0e4588fad221d38ee467c0bffd91115366dc0c5
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@edfb0fe6204400c56fbfd3feba3fe9ad1adfa345
|
||||
|
||||
- name: Build and push Database Docker image
|
||||
uses: docker/build-push-action@f2a1d5e99d037542a71f64918e516c093c6f3fc4
|
||||
with:
|
||||
context: .
|
||||
file: ./litellm-js/spend-logs/Dockerfile
|
||||
push: true
|
||||
tags: |
|
||||
${{ steps.meta-spend-logs.outputs.tags }}-${{ github.event.inputs.tag || 'latest' }},
|
||||
${{ steps.meta-spend-logs.outputs.tags }}-${{ github.event.inputs.release_type }}
|
||||
${{ (github.event.inputs.release_type == 'stable' || github.event.inputs.release_type == 'rc') && format('{0}/berriai/litellm-spend_logs:main-{1}', env.REGISTRY, github.event.inputs.tag) || '' }},
|
||||
${{ github.event.inputs.release_type == 'stable' && format('{0}/berriai/litellm-spend_logs:main-stable', env.REGISTRY) || '' }}
|
||||
platforms: local,linux/amd64,linux/arm64,linux/arm64/v8
|
||||
|
||||
build-and-push-helm-chart:
|
||||
if: github.event.inputs.release_type != 'dev'
|
||||
needs: [docker-hub-deploy, build-and-push-image, build-and-push-image-database]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: lowercase github.repository_owner
|
||||
run: |
|
||||
echo "REPO_OWNER=`echo ${{github.repository_owner}} | tr '[:upper:]' '[:lower:]'`" >>${GITHUB_ENV}
|
||||
|
||||
- name: Get LiteLLM Latest Tag
|
||||
id: current_app_tag
|
||||
shell: bash
|
||||
run: |
|
||||
LATEST_TAG=$(git describe --tags --exclude "*dev*" --abbrev=0)
|
||||
if [ -z "${LATEST_TAG}" ]; then
|
||||
echo "latest_tag=latest" | tee -a $GITHUB_OUTPUT
|
||||
else
|
||||
echo "latest_tag=${LATEST_TAG}" | tee -a $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Get last published chart version
|
||||
id: current_version
|
||||
shell: bash
|
||||
run: |
|
||||
CHART_LIST=$(helm show chart oci://${{ env.REGISTRY }}/${{ env.REPO_OWNER }}/${{ env.CHART_NAME }} 2>/dev/null || true)
|
||||
if [ -z "${CHART_LIST}" ]; then
|
||||
echo "current-version=0.1.0" | tee -a $GITHUB_OUTPUT
|
||||
else
|
||||
printf '%s' "${CHART_LIST}" | grep '^version:' | awk 'BEGIN{FS=":"}{print "current-version="$2}' | tr -d " " | tee -a $GITHUB_OUTPUT
|
||||
fi
|
||||
env:
|
||||
HELM_EXPERIMENTAL_OCI: '1'
|
||||
|
||||
# Automatically update the helm chart version one "patch" level
|
||||
- name: Bump release version
|
||||
id: bump_version
|
||||
uses: christian-draeger/increment-semantic-version@1.1.0
|
||||
with:
|
||||
current-version: ${{ steps.current_version.outputs.current-version || '0.1.0' }}
|
||||
version-fragment: 'bug'
|
||||
|
||||
- uses: ./.github/actions/helm-oci-chart-releaser
|
||||
with:
|
||||
name: ${{ env.CHART_NAME }}
|
||||
repository: ${{ env.REPO_OWNER }}
|
||||
tag: ${{ github.event.inputs.chartVersion || steps.bump_version.outputs.next-version || '0.1.0' }}
|
||||
app_version: ${{ steps.current_app_tag.outputs.latest_tag }}
|
||||
path: deploy/charts/${{ env.CHART_NAME }}
|
||||
registry: ${{ env.REGISTRY }}
|
||||
registry_username: ${{ github.actor }}
|
||||
registry_password: ${{ secrets.GITHUB_TOKEN }}
|
||||
update_dependencies: true
|
||||
|
||||
release:
|
||||
name: "New LiteLLM Release"
|
||||
needs: [docker-hub-deploy, build-and-push-image, build-and-push-image-database]
|
||||
|
||||
runs-on: "ubuntu-latest"
|
||||
|
||||
steps:
|
||||
- name: Display version
|
||||
run: echo "Current version is ${{ github.event.inputs.tag }}"
|
||||
- name: "Set Release Tag"
|
||||
run: echo "RELEASE_TAG=${{ github.event.inputs.tag }}" >> $GITHUB_ENV
|
||||
- name: Display release tag
|
||||
run: echo "RELEASE_TAG is $RELEASE_TAG"
|
||||
- name: "Create release"
|
||||
uses: "actions/github-script@v6"
|
||||
with:
|
||||
github-token: "${{ secrets.GITHUB_TOKEN }}"
|
||||
script: |
|
||||
const commitHash = "${{ github.event.inputs.commit_hash}}";
|
||||
console.log("Commit Hash:", commitHash); // Add this line for debugging
|
||||
try {
|
||||
const response = await github.rest.repos.createRelease({
|
||||
draft: false,
|
||||
generate_release_notes: true,
|
||||
target_commitish: commitHash,
|
||||
name: process.env.RELEASE_TAG,
|
||||
owner: context.repo.owner,
|
||||
prerelease: false,
|
||||
repo: context.repo.repo,
|
||||
tag_name: process.env.RELEASE_TAG,
|
||||
});
|
||||
|
||||
core.exportVariable('RELEASE_ID', response.data.id);
|
||||
core.exportVariable('RELEASE_UPLOAD_URL', response.data.upload_url);
|
||||
} catch (error) {
|
||||
core.setFailed(error.message);
|
||||
}
|
||||
- name: Fetch Release Notes
|
||||
id: release-notes
|
||||
uses: actions/github-script@v6
|
||||
with:
|
||||
github-token: "${{ secrets.GITHUB_TOKEN }}"
|
||||
script: |
|
||||
try {
|
||||
const response = await github.rest.repos.getRelease({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
release_id: process.env.RELEASE_ID,
|
||||
});
|
||||
const formattedBody = JSON.stringify(response.data.body).slice(1, -1);
|
||||
return formattedBody;
|
||||
} catch (error) {
|
||||
core.setFailed(error.message);
|
||||
}
|
||||
env:
|
||||
RELEASE_ID: ${{ env.RELEASE_ID }}
|
||||
- name: Github Releases To Discord
|
||||
env:
|
||||
WEBHOOK_URL: ${{ secrets.WEBHOOK_URL }}
|
||||
REALEASE_TAG: ${{ env.RELEASE_TAG }}
|
||||
RELEASE_NOTES: ${{ steps.release-notes.outputs.result }}
|
||||
run: |
|
||||
curl -H "Content-Type: application/json" -X POST -d '{
|
||||
"content": "New LiteLLM release '"${RELEASE_TAG}"'",
|
||||
"username": "Release Changelog",
|
||||
"avatar_url": "https://cdn.discordapp.com/avatars/487431320314576937/bd64361e4ba6313d561d54e78c9e7171.png",
|
||||
"embeds": [
|
||||
{
|
||||
"title": "Changelog for LiteLLM '"${RELEASE_TAG}"'",
|
||||
"description": "'"${RELEASE_NOTES}"'",
|
||||
"color": 2105893
|
||||
}
|
||||
]
|
||||
}' $WEBHOOK_URL
|
||||
|
67
Development/litellm/.github/workflows/ghcr_helm_deploy.yml
vendored
Normal file
67
Development/litellm/.github/workflows/ghcr_helm_deploy.yml
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
# this workflow is triggered by an API call when there is a new PyPI release of LiteLLM
|
||||
name: Build, Publish LiteLLM Helm Chart. New Release
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
chartVersion:
|
||||
description: "Update the helm chart's version to this"
|
||||
|
||||
# Defines two custom environment variables for the workflow. Used for the Container registry domain, and a name for the Docker image that this workflow builds.
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}
|
||||
REPO_OWNER: ${{github.repository_owner}}
|
||||
|
||||
# There is a single job in this workflow. It's configured to run on the latest available version of Ubuntu.
|
||||
jobs:
|
||||
build-and-push-helm-chart:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: lowercase github.repository_owner
|
||||
run: |
|
||||
echo "REPO_OWNER=`echo ${{github.repository_owner}} | tr '[:upper:]' '[:lower:]'`" >>${GITHUB_ENV}
|
||||
|
||||
- name: Get LiteLLM Latest Tag
|
||||
id: current_app_tag
|
||||
uses: WyriHaximus/github-action-get-previous-tag@v1.3.0
|
||||
|
||||
- name: Get last published chart version
|
||||
id: current_version
|
||||
shell: bash
|
||||
run: helm show chart oci://${{ env.REGISTRY }}/${{ env.REPO_OWNER }}/litellm-helm | grep '^version:' | awk 'BEGIN{FS=":"}{print "current-version="$2}' | tr -d " " | tee -a $GITHUB_OUTPUT
|
||||
env:
|
||||
HELM_EXPERIMENTAL_OCI: '1'
|
||||
|
||||
# Automatically update the helm chart version one "patch" level
|
||||
- name: Bump release version
|
||||
id: bump_version
|
||||
uses: christian-draeger/increment-semantic-version@1.1.0
|
||||
with:
|
||||
current-version: ${{ steps.current_version.outputs.current-version || '0.1.0' }}
|
||||
version-fragment: 'bug'
|
||||
|
||||
- name: Lint helm chart
|
||||
run: helm lint deploy/charts/litellm-helm
|
||||
|
||||
- uses: ./.github/actions/helm-oci-chart-releaser
|
||||
with:
|
||||
name: litellm-helm
|
||||
repository: ${{ env.REPO_OWNER }}
|
||||
tag: ${{ github.event.inputs.chartVersion || steps.bump_version.outputs.next-version || '0.1.0' }}
|
||||
app_version: ${{ steps.current_app_tag.outputs.tag || 'latest' }}
|
||||
path: deploy/charts/litellm-helm
|
||||
registry: ${{ env.REGISTRY }}
|
||||
registry_username: ${{ github.actor }}
|
||||
registry_password: ${{ secrets.GITHUB_TOKEN }}
|
||||
update_dependencies: true
|
||||
|
27
Development/litellm/.github/workflows/helm_unit_test.yml
vendored
Normal file
27
Development/litellm/.github/workflows/helm_unit_test.yml
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
name: Helm unit test
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
unit-test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Helm 3.11.1
|
||||
uses: azure/setup-helm@v1
|
||||
with:
|
||||
version: '3.11.1'
|
||||
|
||||
- name: Install Helm Unit Test Plugin
|
||||
run: |
|
||||
helm plugin install https://github.com/helm-unittest/helm-unittest --version v0.4.4
|
||||
|
||||
- name: Run unit tests
|
||||
run:
|
||||
helm unittest -f 'tests/*.yaml' deploy/charts/litellm-helm
|
138
Development/litellm/.github/workflows/interpret_load_test.py
vendored
Normal file
138
Development/litellm/.github/workflows/interpret_load_test.py
vendored
Normal file
@@ -0,0 +1,138 @@
|
||||
import csv
|
||||
import os
|
||||
from github import Github
|
||||
|
||||
|
||||
def interpret_results(csv_file):
|
||||
with open(csv_file, newline="") as csvfile:
|
||||
csvreader = csv.DictReader(csvfile)
|
||||
rows = list(csvreader)
|
||||
"""
|
||||
in this csv reader
|
||||
- Create 1 new column "Status"
|
||||
- if a row has a median response time < 300 and an average response time < 300, Status = "Passed ✅"
|
||||
- if a row has a median response time >= 300 or an average response time >= 300, Status = "Failed ❌"
|
||||
- Order the table in this order Name, Status, Median Response Time, Average Response Time, Requests/s,Failures/s, Min Response Time, Max Response Time, all other columns
|
||||
"""
|
||||
|
||||
# Add a new column "Status"
|
||||
for row in rows:
|
||||
median_response_time = float(
|
||||
row["Median Response Time"].strip().rstrip("ms")
|
||||
)
|
||||
average_response_time = float(
|
||||
row["Average Response Time"].strip().rstrip("s")
|
||||
)
|
||||
|
||||
request_count = int(row["Request Count"])
|
||||
failure_count = int(row["Failure Count"])
|
||||
|
||||
failure_percent = round((failure_count / request_count) * 100, 2)
|
||||
|
||||
# Determine status based on conditions
|
||||
if (
|
||||
median_response_time < 300
|
||||
and average_response_time < 300
|
||||
and failure_percent < 5
|
||||
):
|
||||
row["Status"] = "Passed ✅"
|
||||
else:
|
||||
row["Status"] = "Failed ❌"
|
||||
|
||||
# Construct Markdown table header
|
||||
markdown_table = "| Name | Status | Median Response Time (ms) | Average Response Time (ms) | Requests/s | Failures/s | Request Count | Failure Count | Min Response Time (ms) | Max Response Time (ms) |"
|
||||
markdown_table += (
|
||||
"\n| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- |"
|
||||
)
|
||||
|
||||
# Construct Markdown table rows
|
||||
for row in rows:
|
||||
markdown_table += f"\n| {row['Name']} | {row['Status']} | {row['Median Response Time']} | {row['Average Response Time']} | {row['Requests/s']} | {row['Failures/s']} | {row['Request Count']} | {row['Failure Count']} | {row['Min Response Time']} | {row['Max Response Time']} |"
|
||||
print("markdown table: ", markdown_table)
|
||||
return markdown_table
|
||||
|
||||
|
||||
def _get_docker_run_command_stable_release(release_version):
|
||||
return f"""
|
||||
\n\n
|
||||
## Docker Run LiteLLM Proxy
|
||||
|
||||
```
|
||||
docker run \\
|
||||
-e STORE_MODEL_IN_DB=True \\
|
||||
-p 4000:4000 \\
|
||||
ghcr.io/berriai/litellm:litellm_stable_release_branch-{release_version}
|
||||
```
|
||||
"""
|
||||
|
||||
|
||||
def _get_docker_run_command(release_version):
|
||||
return f"""
|
||||
\n\n
|
||||
## Docker Run LiteLLM Proxy
|
||||
|
||||
```
|
||||
docker run \\
|
||||
-e STORE_MODEL_IN_DB=True \\
|
||||
-p 4000:4000 \\
|
||||
ghcr.io/berriai/litellm:main-{release_version}
|
||||
```
|
||||
"""
|
||||
|
||||
|
||||
def get_docker_run_command(release_version):
|
||||
if "stable" in release_version:
|
||||
return _get_docker_run_command_stable_release(release_version)
|
||||
else:
|
||||
return _get_docker_run_command(release_version)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
csv_file = "load_test_stats.csv" # Change this to the path of your CSV file
|
||||
markdown_table = interpret_results(csv_file)
|
||||
|
||||
# Update release body with interpreted results
|
||||
github_token = os.getenv("GITHUB_TOKEN")
|
||||
g = Github(github_token)
|
||||
repo = g.get_repo(
|
||||
"BerriAI/litellm"
|
||||
) # Replace with your repository's username and name
|
||||
latest_release = repo.get_latest_release()
|
||||
print("got latest release: ", latest_release)
|
||||
print(latest_release.title)
|
||||
print(latest_release.tag_name)
|
||||
|
||||
release_version = latest_release.title
|
||||
|
||||
print("latest release body: ", latest_release.body)
|
||||
print("markdown table: ", markdown_table)
|
||||
|
||||
# check if "Load Test LiteLLM Proxy Results" exists
|
||||
existing_release_body = latest_release.body
|
||||
if "Load Test LiteLLM Proxy Results" in latest_release.body:
|
||||
# find the "Load Test LiteLLM Proxy Results" section and delete it
|
||||
start_index = latest_release.body.find("Load Test LiteLLM Proxy Results")
|
||||
existing_release_body = latest_release.body[:start_index]
|
||||
|
||||
docker_run_command = get_docker_run_command(release_version)
|
||||
print("docker run command: ", docker_run_command)
|
||||
|
||||
new_release_body = (
|
||||
existing_release_body
|
||||
+ docker_run_command
|
||||
+ "\n\n"
|
||||
+ "### Don't want to maintain your internal proxy? get in touch 🎉"
|
||||
+ "\nHosted Proxy Alpha: https://calendly.com/d/4mp-gd3-k5k/litellm-1-1-onboarding-chat"
|
||||
+ "\n\n"
|
||||
+ "## Load Test LiteLLM Proxy Results"
|
||||
+ "\n\n"
|
||||
+ markdown_table
|
||||
)
|
||||
print("new release body: ", new_release_body)
|
||||
try:
|
||||
latest_release.update_release(
|
||||
name=latest_release.tag_name,
|
||||
message=new_release_body,
|
||||
)
|
||||
except Exception as e:
|
||||
print(e)
|
64
Development/litellm/.github/workflows/issue-keyword-labeler.yml
vendored
Normal file
64
Development/litellm/.github/workflows/issue-keyword-labeler.yml
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
name: Issue Keyword Labeler
|
||||
|
||||
on:
|
||||
issues:
|
||||
types:
|
||||
- opened
|
||||
|
||||
jobs:
|
||||
scan-and-label:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
contents: read
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Scan for provider keywords
|
||||
id: scan
|
||||
env:
|
||||
PROVIDER_ISSUE_WEBHOOK_URL: ${{ secrets.PROVIDER_ISSUE_WEBHOOK_URL }}
|
||||
KEYWORDS: azure,openai,bedrock,vertexai,vertex ai,anthropic
|
||||
run: python3 .github/scripts/scan_keywords.py
|
||||
|
||||
- name: Ensure label exists
|
||||
if: steps.scan.outputs.found == 'true'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
script: |
|
||||
const labelName = 'llm translation';
|
||||
try {
|
||||
await github.rest.issues.getLabel({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
name: labelName
|
||||
});
|
||||
} catch (error) {
|
||||
if (error.status === 404) {
|
||||
await github.rest.issues.createLabel({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
name: labelName,
|
||||
color: 'c1ff72',
|
||||
description: 'Issues related to LLM provider translation/mapping'
|
||||
});
|
||||
} else {
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
- name: Add label to the issue
|
||||
if: steps.scan.outputs.found == 'true'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
script: |
|
||||
await github.rest.issues.addLabels({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
labels: ['llm translation']
|
||||
});
|
||||
|
17
Development/litellm/.github/workflows/label-mlops.yml
vendored
Normal file
17
Development/litellm/.github/workflows/label-mlops.yml
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
name: Label ML Ops Team Issues
|
||||
|
||||
on:
|
||||
issues:
|
||||
types:
|
||||
- opened
|
||||
|
||||
jobs:
|
||||
add-mlops-label:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check if ML Ops Team is selected
|
||||
uses: actions-ecosystem/action-add-labels@v1
|
||||
if: contains(github.event.issue.body, '### Are you a ML Ops Team?') && contains(github.event.issue.body, 'Yes')
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
labels: "mlops user request"
|
89
Development/litellm/.github/workflows/llm-translation-testing.yml
vendored
Normal file
89
Development/litellm/.github/workflows/llm-translation-testing.yml
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
name: LLM Translation Tests
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
release_candidate_tag:
|
||||
description: 'Release candidate tag/version'
|
||||
required: true
|
||||
type: string
|
||||
push:
|
||||
tags:
|
||||
- 'v*-rc*' # Triggers on release candidate tags like v1.0.0-rc1
|
||||
|
||||
jobs:
|
||||
run-llm-translation-tests:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 90
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event.inputs.release_candidate_tag || github.ref }}
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Install Poetry
|
||||
uses: snok/install-poetry@v1
|
||||
with:
|
||||
version: latest
|
||||
virtualenvs-create: true
|
||||
virtualenvs-in-project: true
|
||||
|
||||
- name: Cache Poetry dependencies
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cache/pypoetry
|
||||
.venv
|
||||
key: ${{ runner.os }}-poetry-${{ hashFiles('**/poetry.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-poetry-
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
poetry install --with dev
|
||||
poetry run pip install pytest-xdist pytest-timeout
|
||||
|
||||
- name: Create test results directory
|
||||
run: mkdir -p test-results
|
||||
|
||||
- name: Run LLM Translation Tests
|
||||
env:
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
COHERE_API_KEY: ${{ secrets.COHERE_API_KEY }}
|
||||
GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }}
|
||||
AZURE_API_KEY: ${{ secrets.AZURE_API_KEY }}
|
||||
AZURE_API_BASE: ${{ secrets.AZURE_API_BASE }}
|
||||
AZURE_API_VERSION: ${{ secrets.AZURE_API_VERSION }}
|
||||
# Add other API keys as needed
|
||||
run: |
|
||||
python .github/workflows/run_llm_translation_tests.py \
|
||||
--tag "${{ github.event.inputs.release_candidate_tag || github.ref_name }}" \
|
||||
--commit "${{ github.sha }}" \
|
||||
|| true # Continue even if tests fail
|
||||
|
||||
- name: Display test summary
|
||||
if: always()
|
||||
run: |
|
||||
if [ -f "test-results/llm_translation_report.md" ]; then
|
||||
echo "Test report generated successfully!"
|
||||
echo "Artifact will contain:"
|
||||
echo "- test-results/junit.xml (JUnit XML results)"
|
||||
echo "- test-results/llm_translation_report.md (Beautiful markdown report)"
|
||||
else
|
||||
echo "Warning: Test report was not generated"
|
||||
fi
|
||||
|
||||
- name: Upload test artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: LLM-Translation-Artifact-${{ github.event.inputs.release_candidate_tag || github.ref_name }}
|
||||
path: test-results/
|
||||
retention-days: 30
|
59
Development/litellm/.github/workflows/load_test.yml
vendored
Normal file
59
Development/litellm/.github/workflows/load_test.yml
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
name: Test Locust Load Test
|
||||
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: ["Build, Publish LiteLLM Docker Image. New Release"]
|
||||
types:
|
||||
- completed
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v1
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: '3.x'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install PyGithub
|
||||
- name: re-deploy proxy
|
||||
run: |
|
||||
echo "Current working directory: $PWD"
|
||||
ls
|
||||
python ".github/workflows/redeploy_proxy.py"
|
||||
env:
|
||||
LOAD_TEST_REDEPLOY_URL1: ${{ secrets.LOAD_TEST_REDEPLOY_URL1 }}
|
||||
LOAD_TEST_REDEPLOY_URL2: ${{ secrets.LOAD_TEST_REDEPLOY_URL2 }}
|
||||
working-directory: ${{ github.workspace }}
|
||||
- name: Run Load Test
|
||||
id: locust_run
|
||||
uses: BerriAI/locust-github-action@master
|
||||
with:
|
||||
LOCUSTFILE: ".github/workflows/locustfile.py"
|
||||
URL: "https://post-release-load-test-proxy.onrender.com/"
|
||||
USERS: "20"
|
||||
RATE: "20"
|
||||
RUNTIME: "300s"
|
||||
- name: Process Load Test Stats
|
||||
run: |
|
||||
echo "Current working directory: $PWD"
|
||||
ls
|
||||
python ".github/workflows/interpret_load_test.py"
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
working-directory: ${{ github.workspace }}
|
||||
- name: Upload CSV as Asset to Latest Release
|
||||
uses: xresloader/upload-to-github-release@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
file: "load_test_stats.csv;load_test.html"
|
||||
update_latest_release: true
|
||||
tag_name: "load-test"
|
||||
overwrite: true
|
28
Development/litellm/.github/workflows/locustfile.py
vendored
Normal file
28
Development/litellm/.github/workflows/locustfile.py
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
from locust import HttpUser, task, between
|
||||
|
||||
|
||||
class MyUser(HttpUser):
|
||||
wait_time = between(1, 5)
|
||||
|
||||
@task
|
||||
def chat_completion(self):
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": "Bearer sk-8N1tLOOyH8TIxwOLahhIVg",
|
||||
# Include any additional headers you may need for authentication, etc.
|
||||
}
|
||||
|
||||
# Customize the payload with "model" and "messages" keys
|
||||
payload = {
|
||||
"model": "fake-openai-endpoint",
|
||||
"messages": [
|
||||
{"role": "system", "content": "You are a chat bot."},
|
||||
{"role": "user", "content": "Hello, how are you?"},
|
||||
],
|
||||
# Add more data as necessary
|
||||
}
|
||||
|
||||
# Make a POST request to the "chat/completions" endpoint
|
||||
response = self.client.post("chat/completions", json=payload, headers=headers)
|
||||
|
||||
# Print or log the response if needed
|
34
Development/litellm/.github/workflows/main.yml
vendored
Normal file
34
Development/litellm/.github/workflows/main.yml
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
name: Publish Dev Release to PyPI
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
publish-dev-release:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.8 # Adjust the Python version as needed
|
||||
|
||||
- name: Install dependencies
|
||||
run: pip install toml twine
|
||||
|
||||
- name: Read version from pyproject.toml
|
||||
id: read-version
|
||||
run: |
|
||||
version=$(python -c 'import toml; print(toml.load("pyproject.toml")["tool"]["commitizen"]["version"])')
|
||||
printf "LITELLM_VERSION=%s" "$version" >> $GITHUB_ENV
|
||||
|
||||
- name: Check if version exists on PyPI
|
||||
id: check-version
|
||||
run: |
|
||||
set -e
|
||||
if twine check --repository-url https://pypi.org/simple/ "litellm==$LITELLM_VERSION" >/dev/null 2>&1; then
|
||||
echo "Version $LITELLM_VERSION already exists on PyPI. Skipping publish."
|
||||
|
206
Development/litellm/.github/workflows/publish-migrations.yml
vendored
Normal file
206
Development/litellm/.github/workflows/publish-migrations.yml
vendored
Normal file
@@ -0,0 +1,206 @@
|
||||
name: Publish Prisma Migrations
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- 'schema.prisma' # Check root schema.prisma
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
publish-migrations:
|
||||
runs-on: ubuntu-latest
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:14
|
||||
env:
|
||||
POSTGRES_DB: temp_db
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_PASSWORD: postgres
|
||||
ports:
|
||||
- 5432:5432
|
||||
options: >-
|
||||
--health-cmd pg_isready
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
|
||||
# Add shadow database service
|
||||
postgres_shadow:
|
||||
image: postgres:14
|
||||
env:
|
||||
POSTGRES_DB: shadow_db
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_PASSWORD: postgres
|
||||
ports:
|
||||
- 5433:5432
|
||||
options: >-
|
||||
--health-cmd pg_isready
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.x'
|
||||
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
pip install prisma
|
||||
pip install python-dotenv
|
||||
|
||||
- name: Generate Initial Migration if None Exists
|
||||
env:
|
||||
DATABASE_URL: "postgresql://postgres:postgres@localhost:5432/temp_db"
|
||||
DIRECT_URL: "postgresql://postgres:postgres@localhost:5432/temp_db"
|
||||
SHADOW_DATABASE_URL: "postgresql://postgres:postgres@localhost:5433/shadow_db"
|
||||
run: |
|
||||
mkdir -p deploy/migrations
|
||||
echo 'provider = "postgresql"' > deploy/migrations/migration_lock.toml
|
||||
|
||||
if [ -z "$(ls -A deploy/migrations/2* 2>/dev/null)" ]; then
|
||||
echo "No existing migrations found, creating baseline..."
|
||||
VERSION=$(date +%Y%m%d%H%M%S)
|
||||
mkdir -p deploy/migrations/${VERSION}_initial
|
||||
|
||||
echo "Generating initial migration..."
|
||||
# Save raw output for debugging
|
||||
prisma migrate diff \
|
||||
--from-empty \
|
||||
--to-schema-datamodel schema.prisma \
|
||||
--shadow-database-url "${SHADOW_DATABASE_URL}" \
|
||||
--script > deploy/migrations/${VERSION}_initial/raw_migration.sql
|
||||
|
||||
echo "Raw migration file content:"
|
||||
cat deploy/migrations/${VERSION}_initial/raw_migration.sql
|
||||
|
||||
echo "Cleaning migration file..."
|
||||
# Clean the file
|
||||
sed '/^Installing/d' deploy/migrations/${VERSION}_initial/raw_migration.sql > deploy/migrations/${VERSION}_initial/migration.sql
|
||||
|
||||
# Verify the migration file
|
||||
if [ ! -s deploy/migrations/${VERSION}_initial/migration.sql ]; then
|
||||
echo "ERROR: Migration file is empty after cleaning"
|
||||
echo "Original content was:"
|
||||
cat deploy/migrations/${VERSION}_initial/raw_migration.sql
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Final migration file content:"
|
||||
cat deploy/migrations/${VERSION}_initial/migration.sql
|
||||
|
||||
# Verify it starts with SQL
|
||||
if ! head -n 1 deploy/migrations/${VERSION}_initial/migration.sql | grep -q "^--\|^CREATE\|^ALTER"; then
|
||||
echo "ERROR: Migration file does not start with SQL command or comment"
|
||||
echo "First line is:"
|
||||
head -n 1 deploy/migrations/${VERSION}_initial/migration.sql
|
||||
echo "Full content is:"
|
||||
cat deploy/migrations/${VERSION}_initial/migration.sql
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Initial migration generated at $(date -u)" > deploy/migrations/${VERSION}_initial/README.md
|
||||
fi
|
||||
|
||||
- name: Compare and Generate Migration
|
||||
if: success()
|
||||
env:
|
||||
DATABASE_URL: "postgresql://postgres:postgres@localhost:5432/temp_db"
|
||||
DIRECT_URL: "postgresql://postgres:postgres@localhost:5432/temp_db"
|
||||
SHADOW_DATABASE_URL: "postgresql://postgres:postgres@localhost:5433/shadow_db"
|
||||
run: |
|
||||
# Create temporary migration workspace
|
||||
mkdir -p temp_migrations
|
||||
|
||||
# Copy existing migrations (will not fail if directory is empty)
|
||||
cp -r deploy/migrations/* temp_migrations/ 2>/dev/null || true
|
||||
|
||||
VERSION=$(date +%Y%m%d%H%M%S)
|
||||
|
||||
# Generate diff against existing migrations or empty state
|
||||
prisma migrate diff \
|
||||
--from-migrations temp_migrations \
|
||||
--to-schema-datamodel schema.prisma \
|
||||
--shadow-database-url "${SHADOW_DATABASE_URL}" \
|
||||
--script > temp_migrations/migration_${VERSION}.sql
|
||||
|
||||
# Check if there are actual changes
|
||||
if [ -s temp_migrations/migration_${VERSION}.sql ]; then
|
||||
echo "Changes detected, creating new migration"
|
||||
mkdir -p deploy/migrations/${VERSION}_schema_update
|
||||
mv temp_migrations/migration_${VERSION}.sql deploy/migrations/${VERSION}_schema_update/migration.sql
|
||||
echo "Migration generated at $(date -u)" > deploy/migrations/${VERSION}_schema_update/README.md
|
||||
else
|
||||
echo "No schema changes detected"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
- name: Verify Migration
|
||||
if: success()
|
||||
env:
|
||||
DATABASE_URL: "postgresql://postgres:postgres@localhost:5432/temp_db"
|
||||
DIRECT_URL: "postgresql://postgres:postgres@localhost:5432/temp_db"
|
||||
SHADOW_DATABASE_URL: "postgresql://postgres:postgres@localhost:5433/shadow_db"
|
||||
run: |
|
||||
# Create test database
|
||||
psql "${SHADOW_DATABASE_URL}" -c 'CREATE DATABASE migration_test;'
|
||||
|
||||
# Apply all migrations in order to verify
|
||||
for migration in deploy/migrations/*/migration.sql; do
|
||||
echo "Applying migration: $migration"
|
||||
psql "${SHADOW_DATABASE_URL}" -f $migration
|
||||
done
|
||||
|
||||
# Add this step before create-pull-request to debug permissions
|
||||
- name: Check Token Permissions
|
||||
run: |
|
||||
echo "Checking token permissions..."
|
||||
curl -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
|
||||
-H "Accept: application/vnd.github.v3+json" \
|
||||
https://api.github.com/repos/BerriAI/litellm/collaborators
|
||||
|
||||
echo "\nChecking if token can create PRs..."
|
||||
curl -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
|
||||
-H "Accept: application/vnd.github.v3+json" \
|
||||
https://api.github.com/repos/BerriAI/litellm
|
||||
|
||||
# Add this debug step before git push
|
||||
- name: Debug Changed Files
|
||||
run: |
|
||||
echo "Files staged for commit:"
|
||||
git diff --name-status --staged
|
||||
|
||||
echo "\nAll changed files:"
|
||||
git status
|
||||
|
||||
- name: Create Pull Request
|
||||
if: success()
|
||||
uses: peter-evans/create-pull-request@v5
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
commit-message: "chore: update prisma migrations"
|
||||
title: "Update Prisma Migrations"
|
||||
body: |
|
||||
Auto-generated migration based on schema.prisma changes.
|
||||
|
||||
Generated files:
|
||||
- deploy/migrations/${VERSION}_schema_update/migration.sql
|
||||
- deploy/migrations/${VERSION}_schema_update/README.md
|
||||
branch: feat/prisma-migration-${{ env.VERSION }}
|
||||
base: main
|
||||
delete-branch: true
|
||||
|
||||
- name: Generate and Save Migrations
|
||||
run: |
|
||||
# Only add migration files
|
||||
git add deploy/migrations/
|
||||
git status # Debug what's being committed
|
||||
git commit -m "chore: update prisma migrations"
|
31
Development/litellm/.github/workflows/read_pyproject_version.yml
vendored
Normal file
31
Development/litellm/.github/workflows/read_pyproject_version.yml
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
name: Read Version from pyproject.toml
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main # Change this to the default branch of your repository
|
||||
|
||||
jobs:
|
||||
read-version:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.8 # Adjust the Python version as needed
|
||||
|
||||
- name: Install dependencies
|
||||
run: pip install toml
|
||||
|
||||
- name: Read version from pyproject.toml
|
||||
id: read-version
|
||||
run: |
|
||||
version=$(python -c 'import toml; print(toml.load("pyproject.toml")["tool"]["commitizen"]["version"])')
|
||||
printf "LITELLM_VERSION=%s" "$version" >> $GITHUB_ENV
|
||||
|
||||
- name: Display version
|
||||
run: echo "Current version is $LITELLM_VERSION"
|
20
Development/litellm/.github/workflows/redeploy_proxy.py
vendored
Normal file
20
Development/litellm/.github/workflows/redeploy_proxy.py
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
"""
|
||||
|
||||
redeploy_proxy.py
|
||||
"""
|
||||
|
||||
import os
|
||||
import requests
|
||||
import time
|
||||
|
||||
# send a get request to this endpoint
|
||||
deploy_hook1 = os.getenv("LOAD_TEST_REDEPLOY_URL1")
|
||||
response = requests.get(deploy_hook1, timeout=20)
|
||||
|
||||
|
||||
deploy_hook2 = os.getenv("LOAD_TEST_REDEPLOY_URL2")
|
||||
response = requests.get(deploy_hook2, timeout=20)
|
||||
|
||||
print("SENT GET REQUESTS to re-deploy proxy")
|
||||
print("sleeeping.... for 60s")
|
||||
time.sleep(60)
|
39
Development/litellm/.github/workflows/reset_stable.yml
vendored
Normal file
39
Development/litellm/.github/workflows/reset_stable.yml
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
name: Reset litellm_stable branch
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published, created]
|
||||
jobs:
|
||||
update-stable-branch:
|
||||
if: ${{ startsWith(github.event.release.tag_name, 'v') && !endsWith(github.event.release.tag_name, '-stable') }}
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Reset litellm_stable_release_branch branch to the release commit
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
# Configure Git user
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
|
||||
# Fetch all branches and tags
|
||||
git fetch --all
|
||||
|
||||
# Check if the litellm_stable_release_branch branch exists
|
||||
if git show-ref --verify --quiet refs/remotes/origin/litellm_stable_release_branch; then
|
||||
echo "litellm_stable_release_branch branch exists."
|
||||
git checkout litellm_stable_release_branch
|
||||
else
|
||||
echo "litellm_stable_release_branch branch does not exist. Creating it."
|
||||
git checkout -b litellm_stable_release_branch
|
||||
fi
|
||||
|
||||
# Reset litellm_stable_release_branch branch to the release commit
|
||||
git reset --hard $GITHUB_SHA
|
||||
|
||||
# Push the updated litellm_stable_release_branch branch
|
||||
git push origin litellm_stable_release_branch --force
|
27
Development/litellm/.github/workflows/results_stats.csv
vendored
Normal file
27
Development/litellm/.github/workflows/results_stats.csv
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
Date,"Ben
|
||||
Ashley",Tom Brooks,Jimmy Cooney,"Sue
|
||||
Daniels",Berlinda Fong,Terry Jones,Angelina Little,Linda Smith
|
||||
10/1,FALSE,TRUE,TRUE,TRUE,TRUE,TRUE,FALSE,TRUE
|
||||
10/2,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE
|
||||
10/3,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE
|
||||
10/4,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE
|
||||
10/5,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE
|
||||
10/6,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE
|
||||
10/7,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE
|
||||
10/8,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE
|
||||
10/9,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE
|
||||
10/10,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE
|
||||
10/11,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE
|
||||
10/12,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE
|
||||
10/13,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE
|
||||
10/14,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE
|
||||
10/15,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE
|
||||
10/16,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE
|
||||
10/17,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE
|
||||
10/18,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE
|
||||
10/19,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE
|
||||
10/20,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE
|
||||
10/21,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE
|
||||
10/22,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE
|
||||
10/23,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE
|
||||
Total,0,1,1,1,1,1,0,1
|
|
439
Development/litellm/.github/workflows/run_llm_translation_tests.py
vendored
Normal file
439
Development/litellm/.github/workflows/run_llm_translation_tests.py
vendored
Normal file
@@ -0,0 +1,439 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Run LLM Translation Tests and Generate Beautiful Markdown Report
|
||||
|
||||
This script runs the LLM translation tests and generates a comprehensive
|
||||
markdown report with provider-specific breakdowns and test statistics.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
import xml.etree.ElementTree as ET
|
||||
from collections import defaultdict
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
import json
|
||||
from typing import Dict, List, Tuple, Optional
|
||||
|
||||
# ANSI color codes for terminal output
|
||||
class Colors:
|
||||
GREEN = '\033[92m'
|
||||
RED = '\033[91m'
|
||||
YELLOW = '\033[93m'
|
||||
BLUE = '\033[94m'
|
||||
PURPLE = '\033[95m'
|
||||
CYAN = '\033[96m'
|
||||
RESET = '\033[0m'
|
||||
BOLD = '\033[1m'
|
||||
|
||||
def print_colored(message: str, color: str = Colors.RESET):
|
||||
"""Print colored message to terminal"""
|
||||
print(f"{color}{message}{Colors.RESET}")
|
||||
|
||||
def get_provider_from_test_file(test_file: str) -> str:
|
||||
"""Map test file names to provider names"""
|
||||
provider_mapping = {
|
||||
'test_anthropic': 'Anthropic',
|
||||
'test_azure': 'Azure',
|
||||
'test_bedrock': 'AWS Bedrock',
|
||||
'test_openai': 'OpenAI',
|
||||
'test_vertex': 'Google Vertex AI',
|
||||
'test_gemini': 'Google Vertex AI',
|
||||
'test_cohere': 'Cohere',
|
||||
'test_databricks': 'Databricks',
|
||||
'test_groq': 'Groq',
|
||||
'test_together': 'Together AI',
|
||||
'test_mistral': 'Mistral',
|
||||
'test_deepseek': 'DeepSeek',
|
||||
'test_replicate': 'Replicate',
|
||||
'test_huggingface': 'HuggingFace',
|
||||
'test_fireworks': 'Fireworks AI',
|
||||
'test_perplexity': 'Perplexity',
|
||||
'test_cloudflare': 'Cloudflare',
|
||||
'test_voyage': 'Voyage AI',
|
||||
'test_xai': 'xAI',
|
||||
'test_nvidia': 'NVIDIA',
|
||||
'test_watsonx': 'IBM watsonx',
|
||||
'test_azure_ai': 'Azure AI',
|
||||
'test_snowflake': 'Snowflake',
|
||||
'test_infinity': 'Infinity',
|
||||
'test_jina': 'Jina AI',
|
||||
'test_deepgram': 'Deepgram',
|
||||
'test_clarifai': 'Clarifai',
|
||||
'test_triton': 'Triton',
|
||||
}
|
||||
|
||||
for key, provider in provider_mapping.items():
|
||||
if key in test_file:
|
||||
return provider
|
||||
|
||||
# For cross-provider test files
|
||||
if any(name in test_file for name in ['test_optional_params', 'test_prompt_factory',
|
||||
'test_router', 'test_text_completion']):
|
||||
return f'Cross-Provider Tests ({test_file})'
|
||||
|
||||
return 'Other Tests'
|
||||
|
||||
def format_duration(seconds: float) -> str:
|
||||
"""Format duration in human-readable format"""
|
||||
if seconds < 60:
|
||||
return f"{seconds:.2f}s"
|
||||
elif seconds < 3600:
|
||||
minutes = int(seconds // 60)
|
||||
secs = seconds % 60
|
||||
return f"{minutes}m {secs:.0f}s"
|
||||
else:
|
||||
hours = int(seconds // 3600)
|
||||
minutes = int((seconds % 3600) // 60)
|
||||
return f"{hours}h {minutes}m"
|
||||
|
||||
|
||||
def generate_markdown_report(junit_xml_path: str, output_path: str, tag: str = None, commit: str = None):
|
||||
"""Generate a beautiful markdown report from JUnit XML"""
|
||||
try:
|
||||
tree = ET.parse(junit_xml_path)
|
||||
root = tree.getroot()
|
||||
|
||||
# Handle both testsuite and testsuites root
|
||||
if root.tag == 'testsuites':
|
||||
suites = root.findall('testsuite')
|
||||
else:
|
||||
suites = [root]
|
||||
|
||||
# Overall statistics
|
||||
total_tests = 0
|
||||
total_failures = 0
|
||||
total_errors = 0
|
||||
total_skipped = 0
|
||||
total_time = 0.0
|
||||
|
||||
# Provider breakdown
|
||||
provider_stats = defaultdict(lambda: {'passed': 0, 'failed': 0, 'skipped': 0, 'errors': 0, 'time': 0.0})
|
||||
provider_tests = defaultdict(list)
|
||||
|
||||
for suite in suites:
|
||||
total_tests += int(suite.get('tests', 0))
|
||||
total_failures += int(suite.get('failures', 0))
|
||||
total_errors += int(suite.get('errors', 0))
|
||||
total_skipped += int(suite.get('skipped', 0))
|
||||
total_time += float(suite.get('time', 0))
|
||||
|
||||
for testcase in suite.findall('testcase'):
|
||||
classname = testcase.get('classname', '')
|
||||
test_name = testcase.get('name', '')
|
||||
test_time = float(testcase.get('time', 0))
|
||||
|
||||
# Extract test file name from classname
|
||||
if '.' in classname:
|
||||
parts = classname.split('.')
|
||||
test_file = parts[-2] if len(parts) > 1 else 'unknown'
|
||||
else:
|
||||
test_file = 'unknown'
|
||||
|
||||
provider = get_provider_from_test_file(test_file)
|
||||
provider_stats[provider]['time'] += test_time
|
||||
|
||||
# Check test status
|
||||
if testcase.find('failure') is not None:
|
||||
provider_stats[provider]['failed'] += 1
|
||||
failure = testcase.find('failure')
|
||||
failure_msg = failure.get('message', '') if failure is not None else ''
|
||||
provider_tests[provider].append({
|
||||
'name': test_name,
|
||||
'status': 'FAILED',
|
||||
'time': test_time,
|
||||
'message': failure_msg
|
||||
})
|
||||
elif testcase.find('error') is not None:
|
||||
provider_stats[provider]['errors'] += 1
|
||||
error = testcase.find('error')
|
||||
error_msg = error.get('message', '') if error is not None else ''
|
||||
provider_tests[provider].append({
|
||||
'name': test_name,
|
||||
'status': 'ERROR',
|
||||
'time': test_time,
|
||||
'message': error_msg
|
||||
})
|
||||
elif testcase.find('skipped') is not None:
|
||||
provider_stats[provider]['skipped'] += 1
|
||||
skip = testcase.find('skipped')
|
||||
skip_msg = skip.get('message', '') if skip is not None else ''
|
||||
provider_tests[provider].append({
|
||||
'name': test_name,
|
||||
'status': 'SKIPPED',
|
||||
'time': test_time,
|
||||
'message': skip_msg
|
||||
})
|
||||
else:
|
||||
provider_stats[provider]['passed'] += 1
|
||||
provider_tests[provider].append({
|
||||
'name': test_name,
|
||||
'status': 'PASSED',
|
||||
'time': test_time,
|
||||
'message': ''
|
||||
})
|
||||
|
||||
passed = total_tests - total_failures - total_errors - total_skipped
|
||||
|
||||
# Generate the markdown report
|
||||
with open(output_path, 'w') as f:
|
||||
# Header
|
||||
f.write("# LLM Translation Test Results\n\n")
|
||||
|
||||
# Metadata table
|
||||
f.write("## Test Run Information\n\n")
|
||||
f.write("| Field | Value |\n")
|
||||
f.write("|-------|-------|\n")
|
||||
f.write(f"| **Tag** | `{tag or 'N/A'}` |\n")
|
||||
f.write(f"| **Date** | {datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S UTC')} |\n")
|
||||
f.write(f"| **Commit** | `{commit or 'N/A'}` |\n")
|
||||
f.write(f"| **Duration** | {format_duration(total_time)} |\n")
|
||||
f.write("\n")
|
||||
|
||||
# Overall statistics with visual elements
|
||||
f.write("## Overall Statistics\n\n")
|
||||
|
||||
# Summary box
|
||||
f.write("```\n")
|
||||
f.write(f"Total Tests: {total_tests}\n")
|
||||
f.write(f"├── Passed: {passed:>4} ({(passed/total_tests)*100 if total_tests > 0 else 0:.1f}%)\n")
|
||||
f.write(f"├── Failed: {total_failures:>4} ({(total_failures/total_tests)*100 if total_tests > 0 else 0:.1f}%)\n")
|
||||
f.write(f"├── Errors: {total_errors:>4} ({(total_errors/total_tests)*100 if total_tests > 0 else 0:.1f}%)\n")
|
||||
f.write(f"└── Skipped: {total_skipped:>4} ({(total_skipped/total_tests)*100 if total_tests > 0 else 0:.1f}%)\n")
|
||||
f.write("```\n\n")
|
||||
|
||||
|
||||
# Provider summary table
|
||||
f.write("## Results by Provider\n\n")
|
||||
f.write("| Provider | Total | Pass | Fail | Error | Skip | Pass Rate | Duration |\n")
|
||||
f.write("|----------|-------|------|------|-------|------|-----------|----------|")
|
||||
|
||||
# Sort providers: specific providers first, then cross-provider tests
|
||||
sorted_providers = []
|
||||
cross_provider = []
|
||||
for p in sorted(provider_stats.keys()):
|
||||
if 'Cross-Provider' in p or p == 'Other Tests':
|
||||
cross_provider.append(p)
|
||||
else:
|
||||
sorted_providers.append(p)
|
||||
|
||||
all_providers = sorted_providers + cross_provider
|
||||
|
||||
for provider in all_providers:
|
||||
stats = provider_stats[provider]
|
||||
total = stats['passed'] + stats['failed'] + stats['errors'] + stats['skipped']
|
||||
pass_rate = (stats['passed'] / total * 100) if total > 0 else 0
|
||||
|
||||
f.write(f"\n| {provider} | {total} | {stats['passed']} | {stats['failed']} | ")
|
||||
f.write(f"{stats['errors']} | {stats['skipped']} | {pass_rate:.1f}% | ")
|
||||
f.write(f"{format_duration(stats['time'])} |")
|
||||
|
||||
# Detailed test results by provider
|
||||
f.write("\n\n## Detailed Test Results\n\n")
|
||||
|
||||
for provider in sorted_providers:
|
||||
if provider_tests[provider]:
|
||||
stats = provider_stats[provider]
|
||||
total = stats['passed'] + stats['failed'] + stats['errors'] + stats['skipped']
|
||||
|
||||
f.write(f"### {provider}\n\n")
|
||||
f.write(f"**Summary:** {stats['passed']}/{total} passed ")
|
||||
f.write(f"({(stats['passed']/total)*100 if total > 0 else 0:.1f}%) ")
|
||||
f.write(f"in {format_duration(stats['time'])}\n\n")
|
||||
|
||||
# Group tests by status
|
||||
tests_by_status = defaultdict(list)
|
||||
for test in provider_tests[provider]:
|
||||
tests_by_status[test['status']].append(test)
|
||||
|
||||
# Show failed tests first (if any)
|
||||
if tests_by_status['FAILED']:
|
||||
f.write("<details>\n<summary>Failed Tests</summary>\n\n")
|
||||
for test in tests_by_status['FAILED']:
|
||||
f.write(f"- `{test['name']}` ({test['time']:.2f}s)\n")
|
||||
if test['message']:
|
||||
# Truncate long error messages
|
||||
msg = test['message'][:200] + '...' if len(test['message']) > 200 else test['message']
|
||||
f.write(f" > {msg}\n")
|
||||
f.write("\n</details>\n\n")
|
||||
|
||||
# Show errors (if any)
|
||||
if tests_by_status['ERROR']:
|
||||
f.write("<details>\n<summary>Error Tests</summary>\n\n")
|
||||
for test in tests_by_status['ERROR']:
|
||||
f.write(f"- `{test['name']}` ({test['time']:.2f}s)\n")
|
||||
f.write("\n</details>\n\n")
|
||||
|
||||
# Show passed tests in collapsible section
|
||||
if tests_by_status['PASSED']:
|
||||
f.write("<details>\n<summary>Passed Tests</summary>\n\n")
|
||||
for test in tests_by_status['PASSED']:
|
||||
f.write(f"- `{test['name']}` ({test['time']:.2f}s)\n")
|
||||
f.write("\n</details>\n\n")
|
||||
|
||||
# Show skipped tests (if any)
|
||||
if tests_by_status['SKIPPED']:
|
||||
f.write("<details>\n<summary>Skipped Tests</summary>\n\n")
|
||||
for test in tests_by_status['SKIPPED']:
|
||||
f.write(f"- `{test['name']}`\n")
|
||||
f.write("\n</details>\n\n")
|
||||
|
||||
# Cross-provider tests in a separate section
|
||||
if cross_provider:
|
||||
f.write("### Cross-Provider Tests\n\n")
|
||||
for provider in cross_provider:
|
||||
if provider_tests[provider]:
|
||||
stats = provider_stats[provider]
|
||||
total = stats['passed'] + stats['failed'] + stats['errors'] + stats['skipped']
|
||||
|
||||
f.write(f"#### {provider}\n\n")
|
||||
f.write(f"**Summary:** {stats['passed']}/{total} passed ")
|
||||
f.write(f"({(stats['passed']/total)*100 if total > 0 else 0:.1f}%)\n\n")
|
||||
|
||||
# For cross-provider tests, just show counts
|
||||
f.write(f"- Passed: {stats['passed']}\n")
|
||||
if stats['failed'] > 0:
|
||||
f.write(f"- Failed: {stats['failed']}\n")
|
||||
if stats['errors'] > 0:
|
||||
f.write(f"- Errors: {stats['errors']}\n")
|
||||
if stats['skipped'] > 0:
|
||||
f.write(f"- Skipped: {stats['skipped']}\n")
|
||||
f.write("\n")
|
||||
|
||||
|
||||
print_colored(f"Report generated: {output_path}", Colors.GREEN)
|
||||
|
||||
except Exception as e:
|
||||
print_colored(f"Error generating report: {e}", Colors.RED)
|
||||
raise
|
||||
|
||||
def run_tests(test_path: str = "tests/llm_translation/",
|
||||
junit_xml: str = "test-results/junit.xml",
|
||||
report_path: str = "test-results/llm_translation_report.md",
|
||||
tag: str = None,
|
||||
commit: str = None) -> int:
|
||||
"""Run the LLM translation tests and generate report"""
|
||||
|
||||
# Create test results directory
|
||||
os.makedirs(os.path.dirname(junit_xml), exist_ok=True)
|
||||
|
||||
print_colored("Starting LLM Translation Tests", Colors.BOLD + Colors.BLUE)
|
||||
print_colored(f"Test directory: {test_path}", Colors.CYAN)
|
||||
print_colored(f"Output: {junit_xml}", Colors.CYAN)
|
||||
print()
|
||||
|
||||
# Run pytest
|
||||
cmd = [
|
||||
"poetry", "run", "pytest", test_path,
|
||||
f"--junitxml={junit_xml}",
|
||||
"-v",
|
||||
"--tb=short",
|
||||
"--maxfail=500",
|
||||
"-n", "auto"
|
||||
]
|
||||
|
||||
# Add timeout if pytest-timeout is installed
|
||||
try:
|
||||
subprocess.run(["poetry", "run", "python", "-c", "import pytest_timeout"],
|
||||
capture_output=True, check=True)
|
||||
cmd.extend(["--timeout=300"])
|
||||
except:
|
||||
print_colored("Warning: pytest-timeout not installed, skipping timeout option", Colors.YELLOW)
|
||||
|
||||
print_colored("Running pytest with command:", Colors.YELLOW)
|
||||
print(f" {' '.join(cmd)}")
|
||||
print()
|
||||
|
||||
# Run the tests
|
||||
result = subprocess.run(cmd, capture_output=False)
|
||||
|
||||
# Generate the report regardless of test outcome
|
||||
if os.path.exists(junit_xml):
|
||||
print()
|
||||
print_colored("Generating test report...", Colors.BLUE)
|
||||
generate_markdown_report(junit_xml, report_path, tag, commit)
|
||||
|
||||
# Print summary to console
|
||||
print()
|
||||
print_colored("Test Summary:", Colors.BOLD + Colors.PURPLE)
|
||||
|
||||
# Parse XML for quick summary
|
||||
tree = ET.parse(junit_xml)
|
||||
root = tree.getroot()
|
||||
|
||||
if root.tag == 'testsuites':
|
||||
suites = root.findall('testsuite')
|
||||
else:
|
||||
suites = [root]
|
||||
|
||||
total = sum(int(s.get('tests', 0)) for s in suites)
|
||||
failures = sum(int(s.get('failures', 0)) for s in suites)
|
||||
errors = sum(int(s.get('errors', 0)) for s in suites)
|
||||
skipped = sum(int(s.get('skipped', 0)) for s in suites)
|
||||
passed = total - failures - errors - skipped
|
||||
|
||||
print(f" Total: {total}")
|
||||
print_colored(f" Passed: {passed}", Colors.GREEN)
|
||||
if failures > 0:
|
||||
print_colored(f" Failed: {failures}", Colors.RED)
|
||||
if errors > 0:
|
||||
print_colored(f" Errors: {errors}", Colors.RED)
|
||||
if skipped > 0:
|
||||
print_colored(f" Skipped: {skipped}", Colors.YELLOW)
|
||||
|
||||
if total > 0:
|
||||
pass_rate = (passed / total) * 100
|
||||
color = Colors.GREEN if pass_rate >= 80 else Colors.YELLOW if pass_rate >= 60 else Colors.RED
|
||||
print_colored(f" Pass Rate: {pass_rate:.1f}%", color)
|
||||
else:
|
||||
print_colored("No test results found!", Colors.RED)
|
||||
|
||||
print()
|
||||
print_colored("Test run complete!", Colors.BOLD + Colors.GREEN)
|
||||
|
||||
return result.returncode
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(description="Run LLM Translation Tests")
|
||||
parser.add_argument("--test-path", default="tests/llm_translation/",
|
||||
help="Path to test directory")
|
||||
parser.add_argument("--junit-xml", default="test-results/junit.xml",
|
||||
help="Path for JUnit XML output")
|
||||
parser.add_argument("--report", default="test-results/llm_translation_report.md",
|
||||
help="Path for markdown report")
|
||||
parser.add_argument("--tag", help="Git tag or version")
|
||||
parser.add_argument("--commit", help="Git commit SHA")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Get git info if not provided
|
||||
if not args.commit:
|
||||
try:
|
||||
result = subprocess.run(["git", "rev-parse", "HEAD"],
|
||||
capture_output=True, text=True)
|
||||
if result.returncode == 0:
|
||||
args.commit = result.stdout.strip()
|
||||
except:
|
||||
pass
|
||||
|
||||
if not args.tag:
|
||||
try:
|
||||
result = subprocess.run(["git", "describe", "--tags", "--abbrev=0"],
|
||||
capture_output=True, text=True)
|
||||
if result.returncode == 0:
|
||||
args.tag = result.stdout.strip()
|
||||
except:
|
||||
pass
|
||||
|
||||
exit_code = run_tests(
|
||||
test_path=args.test_path,
|
||||
junit_xml=args.junit_xml,
|
||||
report_path=args.report,
|
||||
tag=args.tag,
|
||||
commit=args.commit
|
||||
)
|
||||
|
||||
sys.exit(exit_code)
|
67
Development/litellm/.github/workflows/simple_pypi_publish.yml
vendored
Normal file
67
Development/litellm/.github/workflows/simple_pypi_publish.yml
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
name: Simple PyPI Publish
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
version:
|
||||
description: 'Version to publish (e.g., 1.74.10)'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
env:
|
||||
TWINE_USERNAME: __token__
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository == 'BerriAI/litellm'
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.8'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install toml build wheel twine
|
||||
|
||||
- name: Update version in pyproject.toml
|
||||
run: |
|
||||
python -c "
|
||||
import toml
|
||||
|
||||
with open('pyproject.toml', 'r') as f:
|
||||
data = toml.load(f)
|
||||
|
||||
data['tool']['poetry']['version'] = '${{ github.event.inputs.version }}'
|
||||
|
||||
with open('pyproject.toml', 'w') as f:
|
||||
toml.dump(data, f)
|
||||
|
||||
print(f'Updated version to ${{ github.event.inputs.version }}')
|
||||
"
|
||||
|
||||
- name: Copy model prices file
|
||||
run: |
|
||||
cp model_prices_and_context_window.json litellm/model_prices_and_context_window_backup.json
|
||||
|
||||
- name: Build package
|
||||
run: |
|
||||
rm -rf build dist
|
||||
python -m build
|
||||
|
||||
- name: Publish to PyPI
|
||||
env:
|
||||
TWINE_PASSWORD: ${{ secrets.PYPI_PUBLISH_PASSWORD }}
|
||||
run: |
|
||||
twine upload dist/*
|
||||
|
||||
- name: Output success
|
||||
run: |
|
||||
echo "✅ Successfully published litellm v${{ github.event.inputs.version }} to PyPI"
|
||||
echo "📦 Package: https://pypi.org/project/litellm/${{ github.event.inputs.version }}/"
|
20
Development/litellm/.github/workflows/stale.yml
vendored
Normal file
20
Development/litellm/.github/workflows/stale.yml
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
name: "Stale Issue Management"
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 0 * * *' # Runs daily at midnight UTC
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v8
|
||||
with:
|
||||
repo-token: "${{ secrets.GITHUB_TOKEN }}"
|
||||
stale-issue-message: "This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs."
|
||||
stale-pr-message: "This pull request has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs."
|
||||
days-before-stale: 90 # Revert to 60 days
|
||||
days-before-close: 7 # Revert to 7 days
|
||||
stale-issue-label: "stale"
|
||||
operations-per-run: 1000
|
57
Development/litellm/.github/workflows/test-linting.yml
vendored
Normal file
57
Development/litellm/.github/workflows/test-linting.yml
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
name: LiteLLM Linting
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches: [ main ]
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.12'
|
||||
|
||||
- name: Install Poetry
|
||||
uses: snok/install-poetry@v1
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
pip install openai==1.99.5
|
||||
poetry install --with dev
|
||||
pip install openai==1.99.5
|
||||
|
||||
|
||||
|
||||
- name: Run Black formatting
|
||||
run: |
|
||||
cd litellm
|
||||
poetry run black .
|
||||
cd ..
|
||||
|
||||
- name: Run Ruff linting
|
||||
run: |
|
||||
cd litellm
|
||||
poetry run ruff check .
|
||||
cd ..
|
||||
|
||||
- name: Run MyPy type checking
|
||||
run: |
|
||||
cd litellm
|
||||
poetry run mypy . --ignore-missing-imports
|
||||
cd ..
|
||||
|
||||
- name: Check for circular imports
|
||||
run: |
|
||||
cd litellm
|
||||
poetry run python ../tests/documentation_tests/test_circular_imports.py
|
||||
cd ..
|
||||
|
||||
- name: Check import safety
|
||||
run: |
|
||||
poetry run python -c "from litellm import *" || (echo '🚨 import failed, this means you introduced unprotected imports! 🚨'; exit 1)
|
42
Development/litellm/.github/workflows/test-litellm.yml
vendored
Normal file
42
Development/litellm/.github/workflows/test-litellm.yml
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
name: LiteLLM Mock Tests (folder - tests/test_litellm)
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches: [ main ]
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 25
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Thank You Message
|
||||
run: |
|
||||
echo "### 🙏 Thank you for contributing to LiteLLM!" >> $GITHUB_STEP_SUMMARY
|
||||
echo "Your PR is being tested now. We appreciate your help in making LiteLLM better!" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.12'
|
||||
|
||||
- name: Install Poetry
|
||||
uses: snok/install-poetry@v1
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
poetry install --with dev,proxy-dev --extras "proxy semantic-router"
|
||||
poetry run pip install "pytest-retry==1.6.3"
|
||||
poetry run pip install pytest-xdist
|
||||
poetry run pip install "google-genai==1.22.0"
|
||||
poetry run pip install "fastapi-offline==1.7.3"
|
||||
- name: Setup litellm-enterprise as local package
|
||||
run: |
|
||||
cd enterprise
|
||||
python -m pip install -e .
|
||||
cd ..
|
||||
- name: Run tests
|
||||
run: |
|
||||
poetry run pytest tests/test_litellm -x -vv -n 4
|
54
Development/litellm/.github/workflows/update_release.py
vendored
Normal file
54
Development/litellm/.github/workflows/update_release.py
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
import os
|
||||
import requests
|
||||
from datetime import datetime
|
||||
|
||||
# GitHub API endpoints
|
||||
GITHUB_API_URL = "https://api.github.com"
|
||||
REPO_OWNER = "BerriAI"
|
||||
REPO_NAME = "litellm"
|
||||
|
||||
# GitHub personal access token (required for uploading release assets)
|
||||
GITHUB_ACCESS_TOKEN = os.environ.get("GITHUB_ACCESS_TOKEN")
|
||||
|
||||
# Headers for GitHub API requests
|
||||
headers = {
|
||||
"Accept": "application/vnd.github+json",
|
||||
"Authorization": f"Bearer {GITHUB_ACCESS_TOKEN}",
|
||||
"X-GitHub-Api-Version": "2022-11-28",
|
||||
}
|
||||
|
||||
# Get the latest release
|
||||
releases_url = f"{GITHUB_API_URL}/repos/{REPO_OWNER}/{REPO_NAME}/releases/latest"
|
||||
response = requests.get(releases_url, headers=headers)
|
||||
latest_release = response.json()
|
||||
print("Latest release:", latest_release)
|
||||
|
||||
# Upload an asset to the latest release
|
||||
upload_url = latest_release["upload_url"].split("{?")[0]
|
||||
asset_name = "results_stats.csv"
|
||||
asset_path = os.path.join(os.getcwd(), asset_name)
|
||||
print("upload_url:", upload_url)
|
||||
|
||||
with open(asset_path, "rb") as asset_file:
|
||||
asset_data = asset_file.read()
|
||||
|
||||
upload_payload = {
|
||||
"name": asset_name,
|
||||
"label": "Load test results",
|
||||
"created_at": datetime.utcnow().isoformat() + "Z",
|
||||
}
|
||||
|
||||
upload_headers = headers.copy()
|
||||
upload_headers["Content-Type"] = "application/octet-stream"
|
||||
|
||||
upload_response = requests.post(
|
||||
upload_url,
|
||||
headers=upload_headers,
|
||||
data=asset_data,
|
||||
params=upload_payload,
|
||||
)
|
||||
|
||||
if upload_response.status_code == 201:
|
||||
print(f"Asset '{asset_name}' uploaded successfully to the latest release.")
|
||||
else:
|
||||
print(f"Failed to upload asset. Response: {upload_response.text}")
|
Reference in New Issue
Block a user