Update .gitattributes
Browse files- .gitattributes +18 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,21 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
# assistant.py
|
| 37 |
+
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
|
| 38 |
+
|
| 39 |
+
# Load pre-trained conversational model
|
| 40 |
+
model_name = "microsoft/DialoGPT-medium" # You can swap with other models like "gpt2", "HuggingFaceH4/zephyr-7b-beta", etc.
|
| 41 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 42 |
+
model = AutoModelForCausalLM.from_pretrained(model_name)
|
| 43 |
+
|
| 44 |
+
# Initialize the pipeline
|
| 45 |
+
chat = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
| 46 |
+
|
| 47 |
+
print("馃 AI Assistant ready! Type something below:")
|
| 48 |
+
|
| 49 |
+
conversation_history = ""
|
| 50 |
+
|
| 51 |
+
while True:
|
| 52 |
+
try:
|
| 53 |
+
user_input
|