diff --git a/how-to-use-openrouter-to-access-multiple-ai-models-in-one-python-script/ask_auto_model.py b/how-to-use-openrouter-to-access-multiple-ai-models-in-one-python-script/ask_auto_model.py new file mode 100644 index 0000000000..f4b61f2aaa --- /dev/null +++ b/how-to-use-openrouter-to-access-multiple-ai-models-in-one-python-script/ask_auto_model.py @@ -0,0 +1,20 @@ +import os +import requests + +OPENROUTER_API_URL = "https://openrouter.ai/api/v1/chat/completions" + +api_key = os.getenv("OPENROUTER_API_KEY") + +headers = { + "Authorization": f"Bearer {api_key}", + "Content-Type": "application/json" +} +payload = { + "model": "openrouter/auto", + "messages": [{"role": "user", "content": "Say hello in one sentence."}] +} +response = requests.post(OPENROUTER_API_URL, headers=headers, json=payload) +data = response.json() + +print(f"Model: {data.get('model')}") +print(f"Response: {data['choices'][0]['message']['content']}") \ No newline at end of file diff --git a/how-to-use-openrouter-to-access-multiple-ai-models-in-one-python-script/ask_specific_model.py b/how-to-use-openrouter-to-access-multiple-ai-models-in-one-python-script/ask_specific_model.py new file mode 100644 index 0000000000..094c8fd874 --- /dev/null +++ b/how-to-use-openrouter-to-access-multiple-ai-models-in-one-python-script/ask_specific_model.py @@ -0,0 +1,24 @@ +import os +import requests + +OPENROUTER_API_URL = "https://openrouter.ai/api/v1/chat/completions" + +api_key = os.getenv("OPENROUTER_API_KEY") + +headers = { + "Authorization": f"Bearer {api_key}", + "Content-Type": "application/json" +} +payload = { + "model": "openai/gpt-3.5-turbo", + "messages": [{"role": "user", "content": "Say hello in one sentence."}] +} +response = requests.post(OPENROUTER_API_URL, headers=headers, json=payload) +data = response.json() + +if model := data.get('model'): + print(f"Model: {model} by {data['provider']}") + print(f"Response: {data['choices'][0]['message']['content']}") +else: + print("No model found in the response.") + print(f"Response: {data}") \ No newline at end of file diff --git a/how-to-use-openrouter-to-access-multiple-ai-models-in-one-python-script/fallback_models.py b/how-to-use-openrouter-to-access-multiple-ai-models-in-one-python-script/fallback_models.py new file mode 100644 index 0000000000..b0bd6181a1 --- /dev/null +++ b/how-to-use-openrouter-to-access-multiple-ai-models-in-one-python-script/fallback_models.py @@ -0,0 +1,32 @@ +import os +import requests + +OPENROUTER_API_URL = "https://openrouter.ai/api/v1/chat/completions" + +api_key = os.getenv("OPENROUTER_API_KEY") + +def make_request_with_fallback(models_list, messages): + headers = { + "Authorization": f"Bearer {api_key}", + "Content-Type": "application/json" + } + payload = {"models": models_list, "messages": messages} + + return requests.post(OPENROUTER_API_URL, headers=headers, json=payload) + +response = make_request_with_fallback( + models_list=[ + "openai/gpt-5", + "openai/gpt-3.5-turbo", + "openai/gpt-3.5-turbo-16k" + ], + messages=[{"role": "user", "content": "What is the capital of France?"}] +) + +data = response.json() +if model := data.get('model'): + print(f"Model: {model} by {data['provider']}") + print(f"Response: {data['choices'][0]['message']['content']}") +else: + print("No model found in the response.") + print(f"Response: {data}") \ No newline at end of file diff --git a/how-to-use-openrouter-to-access-multiple-ai-models-in-one-python-script/get_models.py b/how-to-use-openrouter-to-access-multiple-ai-models-in-one-python-script/get_models.py new file mode 100644 index 0000000000..f4dcfe4616 --- /dev/null +++ b/how-to-use-openrouter-to-access-multiple-ai-models-in-one-python-script/get_models.py @@ -0,0 +1,14 @@ +import os +import requests + +OPENROUTER_MODELS_URL = "https://openrouter.ai/api/v1/models" + +api_key = os.getenv("OPENROUTER_API_KEY") + +headers = {"Authorization": f"Bearer {api_key}"} +response = requests.get(OPENROUTER_MODELS_URL, headers=headers) +data = response.json() + +models = data.get("data", []) +print(f"Success! Found {len(models)} models via OpenRouter.") +print(f"Examples: {', '.join(m['id'] for m in models[:5])}") \ No newline at end of file diff --git a/how-to-use-openrouter-to-access-multiple-ai-models-in-one-python-script/requirements.txt b/how-to-use-openrouter-to-access-multiple-ai-models-in-one-python-script/requirements.txt new file mode 100644 index 0000000000..663bd1f6a2 --- /dev/null +++ b/how-to-use-openrouter-to-access-multiple-ai-models-in-one-python-script/requirements.txt @@ -0,0 +1 @@ +requests \ No newline at end of file diff --git a/how-to-use-openrouter-to-access-multiple-ai-models-in-one-python-script/route_requests.py b/how-to-use-openrouter-to-access-multiple-ai-models-in-one-python-script/route_requests.py new file mode 100644 index 0000000000..04a423292b --- /dev/null +++ b/how-to-use-openrouter-to-access-multiple-ai-models-in-one-python-script/route_requests.py @@ -0,0 +1,32 @@ +import os +import requests + +OPENROUTER_API_URL = "https://openrouter.ai/api/v1/chat/completions" + +api_key = os.getenv("OPENROUTER_API_KEY") + +def make_request(model, messages, provider_config=None): + headers = { + "Authorization": f"Bearer {api_key}", + "Content-Type": "application/json" + } + payload = {"model": model, "messages": messages} + if provider_config: + payload["provider"] = provider_config + + response = requests.post(OPENROUTER_API_URL, headers=headers, json=payload) + response.raise_for_status() + return response.json() + +data = make_request( + model="meta-llama/llama-3.1-70b-instruct", + messages=[{"role": "user", "content": "Explain AI in one sentence."}], + provider_config={"sort": "price"} +) + +if model := data.get('model'): + print(f"Model: {model} by {data['provider']}") + print(f"Response: {data['choices'][0]['message']['content']}") +else: + print("No model found in the response.") + print(f"Response: {data}") \ No newline at end of file