Spaces:
Runtime error
Runtime error
| ### app.py | |
| import socket | |
| import gradio as gr | |
| from openai import OpenAI | |
| def get_local_ip(): | |
| s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) | |
| try: | |
| s.connect(("8.8.8.8", 80)) | |
| ip = s.getsockname()[0] | |
| except Exception: | |
| ip = "127.0.0.1" | |
| finally: | |
| s.close() | |
| return ip | |
| print("本機 IP:", get_local_ip()) | |
| # ✅ 設定 base URL 連接本地 llama.cpp API | |
| client = OpenAI( | |
| base_url="http://0.0.0.0:8000/v1", | |
| api_key="sk-local", # llama.cpp 不檢查內容,只要有就行 | |
| timeout=600 | |
| ) | |
| # ✅ 回應函式 (流式 generator) | |
| def respond(message, history, system_message, max_tokens, temperature, top_p): | |
| # history 是 list of dict: [{"role": "user"/"assistant", "content": "..."}] | |
| messages = [{"role": "system", "content": system_message}] | |
| messages.extend(history) # 直接加入舊對話 | |
| messages.append({"role": "user", "content": message}) | |
| try: | |
| stream = client.chat.completions.create( | |
| model="qwen3", # ⚠️ 替換成你 llama.cpp 載入的模型 general.name | |
| messages=messages, | |
| max_tokens=max_tokens, | |
| temperature=temperature, | |
| top_p=top_p, | |
| stream=True, | |
| ) | |
| output = "" | |
| for chunk in stream: | |
| #print("[DEBUG] chunk:", chunk) | |
| delta = chunk.choices[0].delta | |
| if delta: | |
| # 安全取得 content | |
| if delta.content: | |
| output += delta.content | |
| # 安全取得 reasoning_content,如果不存在就跳過 印出思維練 | |
| #reasoning = getattr(delta, "reasoning_content", None) | |
| #if reasoning: | |
| # output += reasoning | |
| # 每個 chunk 都 yield 最新內容 | |
| yield {"role": "assistant", "content": output} | |
| except Exception as e: | |
| print(f"[Error] {e}") | |
| yield {"role": "assistant", "content": "⚠️ Llama.cpp server 沒有回應,請稍後再試。"} | |
| # ✅ Gradio 介面 (新版必須用 type="messages") | |
| demo = gr.ChatInterface( | |
| respond, | |
| type="messages", # 🔑 使用 OpenAI 風格訊息格式 | |
| additional_inputs=[ | |
| gr.Textbox(value="You are a friendly assistant.", label="System message"), | |
| gr.Slider(minimum=1, maximum=4096, value=4096, step=1, label="Max new tokens"), | |
| gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), | |
| gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"), | |
| ], | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch() |