Spaces:
Paused
Paused
Create func_ai.py
Browse files- func_ai.py +58 -0
func_ai.py
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
import torch
|
3 |
+
from googletrans import Translator
|
4 |
+
from transformers import pipeline
|
5 |
+
import time
|
6 |
+
import os
|
7 |
+
VECTOR_API_URL = os.getenv('API_URL')
|
8 |
+
|
9 |
+
translator = Translator()
|
10 |
+
|
11 |
+
|
12 |
+
sentiment_model = pipeline(
|
13 |
+
'sentiment-analysis',
|
14 |
+
model='cardiffnlp/twitter-xlm-roberta-base-sentiment',
|
15 |
+
tokenizer='cardiffnlp/twitter-xlm-roberta-base-sentiment',
|
16 |
+
device=0 if torch.cuda.is_available() else -1
|
17 |
+
)
|
18 |
+
|
19 |
+
|
20 |
+
|
21 |
+
classifier = pipeline(
|
22 |
+
"zero-shot-classification",
|
23 |
+
model="valhalla/distilbart-mnli-12-6",
|
24 |
+
device=0 if torch.cuda.is_available() else -1
|
25 |
+
)
|
26 |
+
|
27 |
+
|
28 |
+
def classify_comment(text):
|
29 |
+
translated_text = translator.translate(text, dest='en').text
|
30 |
+
result = classifier(translated_text, ["interrogative", "non-interrogative"], clean_up_tokenization_spaces=True)
|
31 |
+
top_class = result['labels'][0]
|
32 |
+
return top_class
|
33 |
+
|
34 |
+
|
35 |
+
def retrieve_from_vdb(query):
|
36 |
+
print(f"Отправка запроса к FastAPI сервису: {query}")
|
37 |
+
response = requests.get(f"{VECTOR_API_URL}/search/", json={"query": query})
|
38 |
+
if response.status_code == 200:
|
39 |
+
results = response.json().get("results", [])
|
40 |
+
print(f"Получено {len(results)} результатов.")
|
41 |
+
return results
|
42 |
+
else:
|
43 |
+
print(f"Ошибка при поиске: {response.text}")
|
44 |
+
return []
|
45 |
+
|
46 |
+
|
47 |
+
def analyze_sentiment(comments):
|
48 |
+
print("Начинаем анализ настроений.")
|
49 |
+
results = []
|
50 |
+
for i in range(0, len(comments), 50):
|
51 |
+
batch = comments[i:i + 50]
|
52 |
+
print(f"Анализируем батч с {i} по {i + len(batch)} комментарий.")
|
53 |
+
batch_results = sentiment_model(batch)
|
54 |
+
results.extend(batch_results)
|
55 |
+
time.sleep(1) # Задержка для предотвращения перегрузки
|
56 |
+
print("Анализ настроений завершен.")
|
57 |
+
return results
|
58 |
+
|