import os
from llm_utils.client import get_openai_client
= "gpt4o"
MODEL
= get_openai_client(
client =MODEL,
model=os.environ.get("CONFIG_PATH")
config_path
)
class ChatGPT:
def __init__(self, model=MODEL):
self.model = model
self.client = client
self.messages = []
def chat_with_gpt(self, user_input: str):
self.messages.append({
"role": "user",
"content": user_input
})= self._generate_response(self.messages)
response return response
def _generate_response(self, messages):
= self.client.chat.completions.create(
response =self.model,
model=messages,
messages=0.2,
temperature=150,
max_tokens=1.0
top_p
)= response.choices[0].message
response_message self.messages.append({
"role": response_message.role,
"content": response_message.content
})
return response_message.content
Exercise: GPT Chatbot
Task: Create a simple chatbot using the OpenAI chat.completions
API.
Instructions:
- Use the
chat.completions
API to send prompts to GPT, receive the answers and displaying them. - Stop the conversation when the user inputs the word
exit
instead of a new prompt. - Hint: Remember that GPT has no memory, so you always have to include the previous conversation in your prompts.
Show solution
# Conversation loop
= ChatGPT(model="gpt4")
chat_gpt
while True:
= input("User: ")
user_input
if user_input.lower() == 'exit':
break
print("User:", user_input)
# Get bot response based on user input
= chat_gpt.chat_with_gpt(user_input)
bot_response
print("Bot:", bot_response)
User: Hello
Bot: Hi there! How can I assist you today?
User:
Bot: It looks like your message got cut off. How can I help you today?