Spaces:
Sleeping
Sleeping
from openai import OpenAI | |
import gradio as gr | |
import os | |
import dotenv | |
dotenv.load_dotenv() | |
OPENAI_API_KEY = os.environ['OPENAI_API_KEY'] | |
print(OPENAI_API_KEY) | |
client = OpenAI(api_key=OPENAI_API_KEY) | |
def predict(message, history): | |
history_openai_format = [] | |
for human, assistant in history: | |
history_openai_format.append({"role": "user", "content": human }) | |
history_openai_format.append({"role": "assistant", "content":assistant}) | |
history_openai_format.append({"role": "user", "content": message}) | |
response = client.chat.completions.create( | |
# model='gpt-3.5-turbo', | |
# model="gpt-4-turbo", | |
model="gpt-4o", | |
messages= history_openai_format, | |
temperature=1.0, | |
stream=True | |
) | |
partial_message = "" | |
for chunk in response: | |
if chunk.choices[0].delta.content is not None: | |
partial_message = partial_message + chunk.choices[0].delta.content | |
yield partial_message | |
gr.ChatInterface(predict).launch() |