InnerI commited on
Commit
147b54b
1 Parent(s): 58e0a19

Create chat.py

Browse files
Files changed (1) hide show
  1. chat.py +47 -0
chat.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+
4
+ # Load the tokenizer and model
5
+ model_name = "InnerI/synCAI-144k-gpt2.5"
6
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
7
+ model = AutoModelForCausalLM.from_pretrained(model_name)
8
+
9
+ # Check if GPU is available and move model to GPU
10
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
11
+ model.to(device)
12
+
13
+ def generate_text(prompt, model, tokenizer, device, max_length=100, temperature=0.7, top_p=0.9, top_k=50):
14
+ try:
15
+ # Tokenize the input prompt
16
+ inputs = tokenizer(prompt, return_tensors="pt")
17
+ inputs = {key: value.to(device) for key, value in inputs.items()}
18
+
19
+ # Generate text
20
+ outputs = model.generate(
21
+ inputs['input_ids'],
22
+ max_length=max_length,
23
+ temperature=temperature,
24
+ top_p=top_p,
25
+ top_k=top_k
26
+ )
27
+
28
+ # Decode and return the generated text
29
+ generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
30
+ return generated_text
31
+ except Exception as e:
32
+ print(f"Error generating text for prompt '{prompt}': {e}")
33
+ return None
34
+
35
+ # Example input prompts
36
+ input_prompts = [
37
+ "Explain the significance of the project:",
38
+ "What methodologies were used in the research?",
39
+ "What are the future implications of the findings?"
40
+ ]
41
+
42
+ # Generate and print texts for each prompt
43
+ for prompt in input_prompts:
44
+ generated_text = generate_text(prompt, model, tokenizer, device)
45
+ if generated_text:
46
+ print(f"Prompt: {prompt}")
47
+ print(f"Generated Text: {generated_text}\n")