Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,79 +1,51 @@
|
|
| 1 |
-
from openai import OpenAI # Assuming Nvidia client is available in the same library, adjust if necessary
|
| 2 |
import streamlit as st
|
| 3 |
import os
|
| 4 |
-
from
|
| 5 |
|
| 6 |
-
# Initialize Nvidia client
|
| 7 |
-
client =
|
| 8 |
-
|
| 9 |
-
|
| 10 |
)
|
| 11 |
|
| 12 |
-
|
|
|
|
|
|
|
| 13 |
|
| 14 |
-
# Sidebar with instructions and Clear Session button
|
| 15 |
-
with st.sidebar:
|
| 16 |
-
# Instruction
|
| 17 |
-
st.markdown("### Instructions 🤖\nThis is a basic chatbot. Ask anything, and the AI will try to help you! The app is supported by Yiqiao Yin.")
|
| 18 |
-
|
| 19 |
-
# Add a section to ask the user for the response length
|
| 20 |
-
st.markdown("#### Select the desired length of the AI response:")
|
| 21 |
-
response_length = st.radio(
|
| 22 |
-
"How detailed do you want the response to be?",
|
| 23 |
-
('Efficient', 'Medium', 'Academic')
|
| 24 |
-
)
|
| 25 |
-
|
| 26 |
-
# Set max_tokens based on user selection
|
| 27 |
-
if response_length == 'Efficient':
|
| 28 |
-
max_tokens = 100
|
| 29 |
-
elif response_length == 'Medium':
|
| 30 |
-
max_tokens = 600
|
| 31 |
-
else: # 'Academic'
|
| 32 |
-
max_tokens = 1024
|
| 33 |
-
|
| 34 |
-
# Clear
|
| 35 |
-
if st.button("Clear Session"):
|
| 36 |
-
st.session_state.clear()
|
| 37 |
-
st.write(f"Copyright © 2010-{datetime.now().year} Present Yiqiao Yin")
|
| 38 |
-
|
| 39 |
-
# Initialize session state variables if not already present
|
| 40 |
-
if "nvidia_model" not in st.session_state:
|
| 41 |
-
st.session_state["nvidia_model"] = "nvidia/llama-3.1-nemotron-70b-instruct"
|
| 42 |
|
| 43 |
if "messages" not in st.session_state:
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
for
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
if
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
st.
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
st.session_state.messages.append({"role": "assistant", "content": response})
|
|
|
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
import os
|
| 3 |
+
from openai import OpenAI
|
| 4 |
|
| 5 |
+
# Initialize the Nvidia API client using API Key stored in Streamlit secrets
|
| 6 |
+
client =OpenAI(
|
| 7 |
+
base_url = "https://integrate.api.nvidia.com/v1",
|
| 8 |
+
api_key = os.getenv("NVIDIA_API_KEY")
|
| 9 |
)
|
| 10 |
|
| 11 |
+
# Define Streamlit app layout
|
| 12 |
+
st.title("AWS Well-Architected Review")
|
| 13 |
+
st.write("Get recommendations for optimizing your AWS architecture.")
|
| 14 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
|
| 16 |
if "messages" not in st.session_state:
|
| 17 |
+
st.session_state.messages = [
|
| 18 |
+
{"role": "system", "content": "You are an assistant that provides recommendations based on AWS Well-Architected Review best practices. Focus on the 5 pillars: Operational Excellence, Security, Reliability, Performance Efficiency, and Cost Optimization."}
|
| 19 |
+
]
|
| 20 |
+
|
| 21 |
+
# User input for AWS architecture description
|
| 22 |
+
architecture_input = st.text_area("Describe your AWS architecture:")
|
| 23 |
+
|
| 24 |
+
# Button to submit the input
|
| 25 |
+
if st.button("Get Recommendations"):
|
| 26 |
+
if architecture_input:
|
| 27 |
+
# Add user input to the conversation
|
| 28 |
+
st.session_state.messages.append({"role": "user", "content": architecture_input})
|
| 29 |
+
|
| 30 |
+
with st.chat_message("assistant"):
|
| 31 |
+
with st.spinner("Generating recommendations..."):
|
| 32 |
+
# Create Nvidia completion request with conversation history
|
| 33 |
+
stream = client.chat.completions.create(
|
| 34 |
+
model="nvidia/llama-3.1-nemotron-70b-instruct", # Nvidia model name
|
| 35 |
+
messages=st.session_state.messages, # Include all messages in the API call
|
| 36 |
+
temperature=0.5,
|
| 37 |
+
top_p=0.7,
|
| 38 |
+
max_tokens=1024,
|
| 39 |
+
stream=True,
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
response_chunks = []
|
| 43 |
+
for chunk in stream:
|
| 44 |
+
if chunk.choices[0].delta.content is not None:
|
| 45 |
+
response_chunks.append(chunk.choices[0].delta.content)
|
| 46 |
+
response = "".join(response_chunks)
|
| 47 |
+
|
| 48 |
+
# Display the response as recommendations
|
| 49 |
+
st.markdown(f"**Recommendations:**\n\n{response}")
|
| 50 |
+
# Add response to conversation history
|
| 51 |
+
st.session_state.messages.append({"role": "assistant", "content": response})
|
|
|