Spaces:
Running
Running
Update utils/llms.py
Browse files- utils/llms.py +2 -2
utils/llms.py
CHANGED
|
@@ -5,7 +5,7 @@ import random
|
|
| 5 |
import json
|
| 6 |
import os
|
| 7 |
|
| 8 |
-
from g4f.Provider import DeepInfraChat,LambdaChat
|
| 9 |
|
| 10 |
gemini_api_keys=json.loads(os.environ.get("GEMINI_KEY_LIST"))
|
| 11 |
groq_api_keys=json.loads(os.environ.get("GROQ_API_KEYS"))
|
|
@@ -18,7 +18,7 @@ deepinframodels=["meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8","microsoft/
|
|
| 18 |
chutes_models={'Llama-4-Maverick-17B-128E-Instruct-FP8':'chutesai/Llama-4-Maverick-17B-128E-Instruct-FP8', "Qwen3-235B":"Qwen/Qwen3-235B-A22B","MAI-DS-R1-FP8":"microsoft/MAI-DS-R1-FP8","DeepSeek-V3-0324":"deepseek-ai/DeepSeek-V3-0324","deepseek-reasoner":"deepseek-ai/DeepSeek-R1-0528","GLM-4-32B-0414":"THUDM/GLM-4-32B-0414","GLM-Z1-32B-0414":"THUDM/GLM-Z1-32B-0414","DeepSeek-R1T-Chimera":"tngtech/DeepSeek-R1T-Chimera", "DeepSeek-R1-Zero":"deepseek-ai/DeepSeek-R1-Zero"}
|
| 19 |
github_models={"gpt4.1":"gpt-4.1","gpt-4o":"gpt-4o","o4-mini":"o4-mini"}
|
| 20 |
|
| 21 |
-
REASONING_CORRESPONDANCE = {"DeepSeekR1-LAMBDA":LambdaChat,"DeepSeekR1":DeepInfraChat
|
| 22 |
os.environ["GEMINI_API_KEY"] =random.choice(gemini_api_keys)
|
| 23 |
|
| 24 |
REASONING_QWQ = {"qwq-32b":DeepInfraChat}
|
|
|
|
| 5 |
import json
|
| 6 |
import os
|
| 7 |
|
| 8 |
+
from g4f.Provider import DeepInfraChat,LambdaChat
|
| 9 |
|
| 10 |
gemini_api_keys=json.loads(os.environ.get("GEMINI_KEY_LIST"))
|
| 11 |
groq_api_keys=json.loads(os.environ.get("GROQ_API_KEYS"))
|
|
|
|
| 18 |
chutes_models={'Llama-4-Maverick-17B-128E-Instruct-FP8':'chutesai/Llama-4-Maverick-17B-128E-Instruct-FP8', "Qwen3-235B":"Qwen/Qwen3-235B-A22B","MAI-DS-R1-FP8":"microsoft/MAI-DS-R1-FP8","DeepSeek-V3-0324":"deepseek-ai/DeepSeek-V3-0324","deepseek-reasoner":"deepseek-ai/DeepSeek-R1-0528","GLM-4-32B-0414":"THUDM/GLM-4-32B-0414","GLM-Z1-32B-0414":"THUDM/GLM-Z1-32B-0414","DeepSeek-R1T-Chimera":"tngtech/DeepSeek-R1T-Chimera", "DeepSeek-R1-Zero":"deepseek-ai/DeepSeek-R1-Zero"}
|
| 19 |
github_models={"gpt4.1":"gpt-4.1","gpt-4o":"gpt-4o","o4-mini":"o4-mini"}
|
| 20 |
|
| 21 |
+
REASONING_CORRESPONDANCE = {"DeepSeekR1-LAMBDA":LambdaChat,"DeepSeekR1":DeepInfraChat}
|
| 22 |
os.environ["GEMINI_API_KEY"] =random.choice(gemini_api_keys)
|
| 23 |
|
| 24 |
REASONING_QWQ = {"qwq-32b":DeepInfraChat}
|