robertselvam commited on
Commit
742f3e7
1 Parent(s): 4fed321

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -66,7 +66,7 @@ class Resume_Overall:
66
 
67
  # Generate a response from the GPT-3 model
68
  chat_completion = self.client.chat.completions.create(
69
- model = "ChatGPT",
70
  messages = conversation,
71
  max_tokens=200,
72
  temperature=0,
@@ -93,7 +93,7 @@ class Resume_Overall:
93
 
94
  # Generate a response from the GPT-3 model
95
  chat_completion = self.client.chat.completions.create(
96
- model = "ChatGPT",
97
  messages = conversation,
98
  max_tokens=200,
99
  temperature=0,
@@ -122,7 +122,7 @@ class Resume_Overall:
122
 
123
  # Generate a response from the GPT-3 model
124
  chat_completion = self.client.chat.completions.create(
125
- model = "ChatGPT", # Choose the GPT-3 engine you want to use
126
  messages = conversation,
127
  max_tokens=100, # Set the maximum number of tokens in the generated response
128
  temperature=0, # Controls the randomness of the output. Higher values = more random, lower values = more focused
@@ -142,7 +142,7 @@ class Resume_Overall:
142
  ]
143
 
144
  chat_completion = self.client.chat.completions.create(
145
- model = "ChatGPT",
146
  messages = conversation,
147
  max_tokens=100,
148
  temperature=0,
@@ -175,7 +175,7 @@ class Resume_Overall:
175
  ]
176
 
177
  chat_completion = self.client.chat.completions.create(
178
- model = "ChatGPT", # Choose the GPT-3 engine you want to use
179
  messages = conversation,
180
  max_tokens=500, # Set the maximum number of tokens in the generated response
181
  temperature=0.5, # Controls the randomness of the output. Higher values = more random, lower values = more focused
@@ -202,7 +202,7 @@ class Resume_Overall:
202
 
203
  # Generate a response from the GPT-3 model
204
  chat_completion = self.client.chat.completions.create(
205
- model = "ChatGPT", # Choose the GPT-3 engine you want to use
206
  messages = conversation,
207
  max_tokens=200, # Set the maximum number of tokens in the generated response
208
  temperature=0, # Controls the randomness of the output. Higher values = more random, lower values = more focused
 
66
 
67
  # Generate a response from the GPT-3 model
68
  chat_completion = self.client.chat.completions.create(
69
+ model = "GPT-3",
70
  messages = conversation,
71
  max_tokens=200,
72
  temperature=0,
 
93
 
94
  # Generate a response from the GPT-3 model
95
  chat_completion = self.client.chat.completions.create(
96
+ model = "GPT-3",
97
  messages = conversation,
98
  max_tokens=200,
99
  temperature=0,
 
122
 
123
  # Generate a response from the GPT-3 model
124
  chat_completion = self.client.chat.completions.create(
125
+ model = "GPT-3", # Choose the GPT-3 engine you want to use
126
  messages = conversation,
127
  max_tokens=100, # Set the maximum number of tokens in the generated response
128
  temperature=0, # Controls the randomness of the output. Higher values = more random, lower values = more focused
 
142
  ]
143
 
144
  chat_completion = self.client.chat.completions.create(
145
+ model = "GPT-3",
146
  messages = conversation,
147
  max_tokens=100,
148
  temperature=0,
 
175
  ]
176
 
177
  chat_completion = self.client.chat.completions.create(
178
+ model = "GPT-3", # Choose the GPT-3 engine you want to use
179
  messages = conversation,
180
  max_tokens=500, # Set the maximum number of tokens in the generated response
181
  temperature=0.5, # Controls the randomness of the output. Higher values = more random, lower values = more focused
 
202
 
203
  # Generate a response from the GPT-3 model
204
  chat_completion = self.client.chat.completions.create(
205
+ model = "GPT-3", # Choose the GPT-3 engine you want to use
206
  messages = conversation,
207
  max_tokens=200, # Set the maximum number of tokens in the generated response
208
  temperature=0, # Controls the randomness of the output. Higher values = more random, lower values = more focused