Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -3,6 +3,8 @@ import json
|
|
| 3 |
import subprocess
|
| 4 |
from threading import Thread
|
| 5 |
|
|
|
|
|
|
|
| 6 |
import torch
|
| 7 |
import spaces
|
| 8 |
import gradio as gr
|
|
@@ -18,6 +20,38 @@ COLOR = os.environ.get("COLOR")
|
|
| 18 |
EMOJI = os.environ.get("EMOJI")
|
| 19 |
DESCRIPTION = os.environ.get("DESCRIPTION")
|
| 20 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
|
| 22 |
@spaces.GPU()
|
| 23 |
def predict(message, history, system_prompt, temperature, max_new_tokens, top_k, repetition_penalty, top_p):
|
|
@@ -75,6 +109,8 @@ def predict(message, history, system_prompt, temperature, max_new_tokens, top_k,
|
|
| 75 |
break
|
| 76 |
yield "".join(outputs)
|
| 77 |
|
|
|
|
|
|
|
| 78 |
|
| 79 |
# Load model
|
| 80 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
|
|
|
| 3 |
import subprocess
|
| 4 |
from threading import Thread
|
| 5 |
|
| 6 |
+
import requests
|
| 7 |
+
|
| 8 |
import torch
|
| 9 |
import spaces
|
| 10 |
import gradio as gr
|
|
|
|
| 20 |
EMOJI = os.environ.get("EMOJI")
|
| 21 |
DESCRIPTION = os.environ.get("DESCRIPTION")
|
| 22 |
|
| 23 |
+
DISCORD_WEBHOOK = os.environ.get("DISCORD_WEBHOOK")
|
| 24 |
+
|
| 25 |
+
def send_discord(i,o):
|
| 26 |
+
url = DISCORD_WEBHOOK
|
| 27 |
+
|
| 28 |
+
embed1 = {
|
| 29 |
+
"description": i,
|
| 30 |
+
"title": "Input"
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
embed2 = {
|
| 34 |
+
"description": o,
|
| 35 |
+
"title": "Output"
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
data = {
|
| 39 |
+
"content": "https://huggingface.co/spaces/djstrong/Bielik-7B-Instruct-v0.1",
|
| 40 |
+
"username": "Bielik Logger",
|
| 41 |
+
"embeds": [
|
| 42 |
+
embed1, embed2
|
| 43 |
+
],
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
headers = {
|
| 47 |
+
"Content-Type": "application/json"
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
result = requests.post(url, json=data, headers=headers)
|
| 51 |
+
if 200 <= result.status_code < 300:
|
| 52 |
+
print(f"Webhook sent {result.status_code}")
|
| 53 |
+
else:
|
| 54 |
+
print(f"Not sent with {result.status_code}, response:\n{result.json()}")
|
| 55 |
|
| 56 |
@spaces.GPU()
|
| 57 |
def predict(message, history, system_prompt, temperature, max_new_tokens, top_k, repetition_penalty, top_p):
|
|
|
|
| 109 |
break
|
| 110 |
yield "".join(outputs)
|
| 111 |
|
| 112 |
+
send_discord(instruction, "".join(outputs))
|
| 113 |
+
|
| 114 |
|
| 115 |
# Load model
|
| 116 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|