oflakne26 commited on
Commit
9ab2ecb
·
verified ·
1 Parent(s): cdca6a5

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +9 -12
main.py CHANGED
@@ -23,7 +23,6 @@ class InputData(BaseModel):
23
  end_token: str
24
  system_prompt: str
25
  user_input: str
26
- json_prompt: str
27
  history: str = ""
28
 
29
  class WordCheckData(BaseModel):
@@ -34,18 +33,14 @@ class WordCheckData(BaseModel):
34
  async def generate_response(data: InputData) -> Dict[str, Any]:
35
  client = InferenceClient(model=data.model, token=HF_TOKEN)
36
 
37
- sentences = tokenizer.tokenize(data.user_input)
38
 
39
- this_is_a_string = ""
40
 
41
- for sentence in sentences:
42
- this_is_a_string += "\n" + sentence
43
-
44
- data.history += data.prompt_template.replace("{Prompt}", str(this_is_a_string))
45
 
46
  inputs = (
47
  data.system_prompt_template.replace("{SystemPrompt}", data.system_prompt) +
48
- data.system_prompt_template.replace("{SystemPrompt}", data.json_prompt) +
49
  data.history
50
  )
51
 
@@ -61,14 +56,16 @@ async def generate_response(data: InputData) -> Dict[str, Any]:
61
 
62
  response_str = str(response)
63
 
64
- sentences = tokenizer.tokenize(response_str)
65
 
66
  cleaned_response = {
67
- "New response": sentences,
68
- "Sentence count": min(len(sentences), 3)
69
  }
70
 
71
- data.history += str(cleaned_response)
 
 
72
 
73
  return {
74
  "response": cleaned_response,
 
23
  end_token: str
24
  system_prompt: str
25
  user_input: str
 
26
  history: str = ""
27
 
28
  class WordCheckData(BaseModel):
 
33
  async def generate_response(data: InputData) -> Dict[str, Any]:
34
  client = InferenceClient(model=data.model, token=HF_TOKEN)
35
 
36
+ user_sentences = tokenizer.tokenize(data.user_input)
37
 
38
+ user_input_str = "\n".join(user_sentences)
39
 
40
+ data.history += data.prompt_template.replace("{Prompt}", user_input_str)
 
 
 
41
 
42
  inputs = (
43
  data.system_prompt_template.replace("{SystemPrompt}", data.system_prompt) +
 
44
  data.history
45
  )
46
 
 
56
 
57
  response_str = str(response)
58
 
59
+ ai_sentences = tokenizer.tokenize(response_str)
60
 
61
  cleaned_response = {
62
+ "New response": ai_sentences,
63
+ "Sentence count": min(len(ai_sentences), 3)
64
  }
65
 
66
+ ai_response_str = "\n".join(ai_sentences)
67
+
68
+ data.history += ai_response_str + "\n"
69
 
70
  return {
71
  "response": cleaned_response,