Spaces:
Sleeping
Sleeping
ghost717
commited on
Commit
•
c6e1096
1
Parent(s):
c4ef1ac
test
Browse files- chat-8-api.py +2 -2
- chat-9-api.py +17 -6
- chat-groq.py +59 -0
- chat.py +2 -1
chat-8-api.py
CHANGED
@@ -10,8 +10,8 @@ load_dotenv()
|
|
10 |
|
11 |
OPENAI_API_KEY = os.environ['GROQ_API_KEY']
|
12 |
|
13 |
-
|
14 |
-
llm =
|
15 |
# llm = openai(temperature=1.0, model='gpt-4o')
|
16 |
|
17 |
def predict(message, history):
|
|
|
10 |
|
11 |
OPENAI_API_KEY = os.environ['GROQ_API_KEY']
|
12 |
|
13 |
+
llm = ChatOpenAI(temperature=1.0, model='gpt-3.5-turbo-0613')
|
14 |
+
# llm = ChatOpenAI(temperature=1.0, model='gpt-4-turbo')
|
15 |
# llm = openai(temperature=1.0, model='gpt-4o')
|
16 |
|
17 |
def predict(message, history):
|
chat-9-api.py
CHANGED
@@ -1,8 +1,15 @@
|
|
1 |
from openai import OpenAI
|
2 |
import gradio as gr
|
|
|
|
|
3 |
|
4 |
-
|
5 |
-
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
def predict(message, history):
|
8 |
history_openai_format = []
|
@@ -11,10 +18,14 @@ def predict(message, history):
|
|
11 |
history_openai_format.append({"role": "assistant", "content":assistant})
|
12 |
history_openai_format.append({"role": "user", "content": message})
|
13 |
|
14 |
-
response = client.chat.completions.create(
|
15 |
-
|
16 |
-
|
17 |
-
|
|
|
|
|
|
|
|
|
18 |
|
19 |
partial_message = ""
|
20 |
for chunk in response:
|
|
|
1 |
from openai import OpenAI
|
2 |
import gradio as gr
|
3 |
+
import os
|
4 |
+
import dotenv
|
5 |
|
6 |
+
dotenv.load_dotenv()
|
7 |
+
|
8 |
+
OPENAI_API_KEY = os.environ['OPENAI_API_KEY']
|
9 |
+
|
10 |
+
print(OPENAI_API_KEY)
|
11 |
+
|
12 |
+
client = OpenAI(api_key=OPENAI_API_KEY)
|
13 |
|
14 |
def predict(message, history):
|
15 |
history_openai_format = []
|
|
|
18 |
history_openai_format.append({"role": "assistant", "content":assistant})
|
19 |
history_openai_format.append({"role": "user", "content": message})
|
20 |
|
21 |
+
response = client.chat.completions.create(
|
22 |
+
# model='gpt-3.5-turbo',
|
23 |
+
# model="gpt-4-turbo",
|
24 |
+
model="gpt-4o",
|
25 |
+
messages= history_openai_format,
|
26 |
+
temperature=1.0,
|
27 |
+
stream=True
|
28 |
+
)
|
29 |
|
30 |
partial_message = ""
|
31 |
for chunk in response:
|
chat-groq.py
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import os
|
3 |
+
from groq import Groq
|
4 |
+
import random
|
5 |
+
|
6 |
+
from langchain.chains import ConversationChain
|
7 |
+
from langchain.chains.conversation.memory import ConversationBufferWindowMemory
|
8 |
+
from langchain_groq import ChatGroq
|
9 |
+
from langchain.prompts import PromptTemplate
|
10 |
+
from dotenv import load_dotenv
|
11 |
+
import os
|
12 |
+
|
13 |
+
load_dotenv()
|
14 |
+
|
15 |
+
groq_api_key = os.environ['GROQ_API_KEY']
|
16 |
+
|
17 |
+
def main():
|
18 |
+
|
19 |
+
st.title("Groq Chat App")
|
20 |
+
|
21 |
+
# Add customization options to the sidebar
|
22 |
+
st.sidebar.title('Select an LLM')
|
23 |
+
model = st.sidebar.selectbox(
|
24 |
+
'Choose a model',
|
25 |
+
['mixtral-8x7b-32768', 'llama2-70b-4096']
|
26 |
+
)
|
27 |
+
conversational_memory_length = st.sidebar.slider('Conversational memory length:', 1, 10, value = 5)
|
28 |
+
|
29 |
+
memory=ConversationBufferWindowMemory(k=conversational_memory_length)
|
30 |
+
|
31 |
+
user_question = st.text_area("Ask a question:")
|
32 |
+
|
33 |
+
# session state variable
|
34 |
+
if 'chat_history' not in st.session_state:
|
35 |
+
st.session_state.chat_history=[]
|
36 |
+
else:
|
37 |
+
for message in st.session_state.chat_history:
|
38 |
+
memory.save_context({'input':message['human']},{'output':message['AI']})
|
39 |
+
|
40 |
+
|
41 |
+
# Initialize Groq Langchain chat object and conversation
|
42 |
+
groq_chat = ChatGroq(
|
43 |
+
groq_api_key=groq_api_key,
|
44 |
+
model_name=model
|
45 |
+
)
|
46 |
+
|
47 |
+
conversation = ConversationChain(
|
48 |
+
llm=groq_chat,
|
49 |
+
memory=memory
|
50 |
+
)
|
51 |
+
|
52 |
+
if user_question:
|
53 |
+
response = conversation(user_question)
|
54 |
+
message = {'human':user_question,'AI':response['response']}
|
55 |
+
st.session_state.chat_history.append(message)
|
56 |
+
st.write("Chatbot:", response['response'])
|
57 |
+
|
58 |
+
if __name__ == "__main__":
|
59 |
+
main()
|
chat.py
CHANGED
@@ -8,8 +8,9 @@ OPENAI_API_KEY = os.environ['OPENAI_API_KEY']
|
|
8 |
client = OpenAI()
|
9 |
|
10 |
response = client.chat.completions.create(
|
|
|
11 |
# model="gpt-4-turbo",
|
12 |
-
|
13 |
messages=[
|
14 |
{"role": "system", "content": "You are a helpful assistant."},
|
15 |
{"role": "user", "content": "Who won the world series in 2020?"},
|
|
|
8 |
client = OpenAI()
|
9 |
|
10 |
response = client.chat.completions.create(
|
11 |
+
model="gpt-3.5-turbo",
|
12 |
# model="gpt-4-turbo",
|
13 |
+
# model="gpt-4o",
|
14 |
messages=[
|
15 |
{"role": "system", "content": "You are a helpful assistant."},
|
16 |
{"role": "user", "content": "Who won the world series in 2020?"},
|