| import torch |
| from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline |
|
|
| class ZewAI3: |
| def __init__(self, model_name="microsoft/phi-2"): |
| |
| print(f"Initializing ZewAI 3 based on {model_name}...") |
| self.tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) |
| self.model = AutoModelForCausalLM.from_pretrained( |
| model_name, |
| torch_dtype=torch.float32, |
| trust_remote_code=True |
| ) |
| self.pipe = pipeline("text-generation", model=self.model, tokenizer=self.tokenizer) |
|
|
| def generate_code(self, prompt, max_length=512): |
| |
| formatted_prompt = f"Instruct: Write the following code: {prompt}\nOutput:" |
| |
| results = self.pipe( |
| formatted_prompt, |
| max_new_tokens=max_length, |
| do_sample=True, |
| temperature=0.7 |
| ) |
| return results[0]['generated_text'] |
|
|
| |
| if __name__ == "__main__": |
| zew_model = ZewAI3() |
| test_prompt = "Create a single-file HTML app with a dark mode toggle." |
| print(zew_model.generate_code(test_prompt)) |
|
|