import os
from llm_utils.client import get_openai_client
MODEL = "gpt-4o"
client = get_openai_client(
    model=MODEL,
    config_path=os.environ.get("CONFIG_PATH")
)
class ChatGPT:
    def __init__(self, model=MODEL):
        self.model = model
        self.client = client
        self.messages = []
    def chat_with_gpt(self, user_input: str):
        self.messages.append({
            "role": "user",
            "content": user_input
        })
        response = self._generate_response(self.messages)
        return response
    def _generate_response(self, messages):
        response = self.client.chat.completions.create(
            model=self.model,
            messages=messages,        
            temperature=0.2, 
            max_tokens=150,
            top_p=1.0
        )
        response_message = response.choices[0].message
        self.messages.append({
            "role": response_message.role,
            "content": response_message.content
        })
        return response_message.contentExercise: GPT Chatbot
Task: Create a simple chatbot using the OpenAI chat.completions API.
Instructions:
- Use the chat.completionsAPI to send prompts to GPT, receive the answers and displaying them.
- Stop the conversation when the user inputs the word exitinstead of a new prompt.
- Hint: Remember that GPT has no memory, so you always have to include the previous conversation in your prompts.
Show solution
# Conversation loop
chat_gpt = ChatGPT(model="gpt-4o")
while True:
    user_input = input("User: ")
    if user_input.lower() == 'exit':
        break
    
    print("User:", user_input)
    
    # Get bot response based on user input
    bot_response = chat_gpt.chat_with_gpt(user_input)
    print("Bot:", bot_response)User: Hello
Bot: Hi there! How can I assist you today?
User: 
Bot: It looks like your message got cut off. How can I help you today?