Spaces:
Paused
Paused
Update func_ai.py
Browse files- func_ai.py +39 -56
func_ai.py
CHANGED
@@ -1,91 +1,74 @@
|
|
1 |
-
# func_ai.py
|
2 |
|
3 |
import requests
|
4 |
import torch
|
|
|
5 |
from transformers import pipeline
|
6 |
from deep_translator import GoogleTranslator
|
7 |
import time
|
8 |
-
import os
|
9 |
-
from datetime import datetime
|
10 |
-
|
11 |
VECTOR_API_URL = os.getenv('API_URL')
|
12 |
|
13 |
-
|
14 |
-
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
15 |
-
print(f"[{timestamp}] {message}")
|
16 |
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
tokenizer='cardiffnlp/twitter-xlm-roberta-base-sentiment',
|
24 |
-
device=0 if torch.cuda.is_available() else -1
|
25 |
-
)
|
26 |
-
|
27 |
-
classifier = pipeline(
|
28 |
-
"zero-shot-classification",
|
29 |
-
model="valhalla/distilbart-mnli-12-6",
|
30 |
-
device=0 if torch.cuda.is_available() else -1
|
31 |
-
)
|
32 |
-
return sentiment_model, classifier
|
33 |
|
34 |
-
|
|
|
|
|
|
|
|
|
35 |
|
36 |
def classify_comment(text):
|
37 |
if not text:
|
38 |
-
|
39 |
return "non-interrogative"
|
40 |
-
|
41 |
try:
|
42 |
translated_text = GoogleTranslator(source='auto', target="en").translate(text)
|
43 |
-
|
44 |
except Exception as e:
|
45 |
-
|
46 |
return "non-interrogative"
|
47 |
if not translated_text:
|
48 |
-
|
49 |
return "non-interrogative"
|
50 |
|
51 |
try:
|
52 |
result = classifier(translated_text, ["interrogative", "non-interrogative"], clean_up_tokenization_spaces=True)
|
53 |
-
|
54 |
except Exception as e:
|
55 |
-
|
56 |
return "non-interrogative"
|
57 |
|
58 |
top_class = result['labels'][0]
|
59 |
-
|
60 |
return top_class
|
61 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
def analyze_sentiment(comments):
|
63 |
-
|
64 |
results = []
|
65 |
for i in range(0, len(comments), 50):
|
66 |
batch = comments[i:i + 50]
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
results.extend(batch_results)
|
72 |
-
except Exception as e:
|
73 |
-
log_message(f"Ошибка при анализе настроений батча: {e}")
|
74 |
time.sleep(1) # Задержка для предотвращения перегрузки
|
75 |
-
|
76 |
return results
|
77 |
-
|
78 |
-
def retrieve_from_vdb(query):
|
79 |
-
log_message(f"Отправка запроса к FastAPI сервису: {query}")
|
80 |
-
try:
|
81 |
-
response = requests.post(f"{VECTOR_API_URL}/search/", json={"query": query})
|
82 |
-
if response.status_code == 200:
|
83 |
-
results = response.json().get("results", [])
|
84 |
-
log_message(f"Получено {len(results)} результатов: {results}")
|
85 |
-
return results
|
86 |
-
else:
|
87 |
-
log_message(f"Ошибка при поиске: {response.text}")
|
88 |
-
return []
|
89 |
-
except Exception as e:
|
90 |
-
log_message(f"Ошибка при запросе к векторной базе данных: {e}")
|
91 |
-
return []
|
|
|
|
|
1 |
|
2 |
import requests
|
3 |
import torch
|
4 |
+
# from googletrans import Translator
|
5 |
from transformers import pipeline
|
6 |
from deep_translator import GoogleTranslator
|
7 |
import time
|
8 |
+
import os
|
|
|
|
|
9 |
VECTOR_API_URL = os.getenv('API_URL')
|
10 |
|
11 |
+
# translator = Translator()
|
|
|
|
|
12 |
|
13 |
+
sentiment_model = pipeline(
|
14 |
+
'sentiment-analysis',
|
15 |
+
model='cardiffnlp/twitter-xlm-roberta-base-sentiment',
|
16 |
+
tokenizer='cardiffnlp/twitter-xlm-roberta-base-sentiment',
|
17 |
+
device=0 if torch.cuda.is_available() else -1
|
18 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
+
classifier = pipeline(
|
21 |
+
"zero-shot-classification",
|
22 |
+
model="valhalla/distilbart-mnli-12-6",
|
23 |
+
device=0 if torch.cuda.is_available() else -1
|
24 |
+
)
|
25 |
|
26 |
def classify_comment(text):
|
27 |
if not text:
|
28 |
+
print("Received empty text for classification.")
|
29 |
return "non-interrogative"
|
30 |
+
print(f"Classifying comment: {text}")
|
31 |
try:
|
32 |
translated_text = GoogleTranslator(source='auto', target="en").translate(text)
|
33 |
+
print(f"Translated text: {translated_text}")
|
34 |
except Exception as e:
|
35 |
+
print(f"Translation failed: {e}")
|
36 |
return "non-interrogative"
|
37 |
if not translated_text:
|
38 |
+
print("Translation returned empty text.")
|
39 |
return "non-interrogative"
|
40 |
|
41 |
try:
|
42 |
result = classifier(translated_text, ["interrogative", "non-interrogative"], clean_up_tokenization_spaces=True)
|
43 |
+
print(f"Classification result: {result}")
|
44 |
except Exception as e:
|
45 |
+
print(f"Classification failed: {e}")
|
46 |
return "non-interrogative"
|
47 |
|
48 |
top_class = result['labels'][0]
|
49 |
+
print(f"Top class: {top_class}")
|
50 |
return top_class
|
51 |
|
52 |
+
def retrieve_from_vdb(query):
|
53 |
+
print(f"Отправка запроса к FastAPI сервису: {query}")
|
54 |
+
response = requests.post(f"{VECTOR_API_URL}/search/", json={"query": query})
|
55 |
+
if response.status_code == 200:
|
56 |
+
results = response.json().get("results", [])
|
57 |
+
print(f"Получено {len(results)} результатов: {results}")
|
58 |
+
return results
|
59 |
+
else:
|
60 |
+
print(f"Ошибка при поиске: {response.text}")
|
61 |
+
return []
|
62 |
+
|
63 |
def analyze_sentiment(comments):
|
64 |
+
print("Начинаем анализ настроений.")
|
65 |
results = []
|
66 |
for i in range(0, len(comments), 50):
|
67 |
batch = comments[i:i + 50]
|
68 |
+
print(f"Анализируем батч с {i} по {i + len(batch)} комментарий: {batch}")
|
69 |
+
batch_results = sentiment_model(batch)
|
70 |
+
print(f"Результаты батча: {batch_results}")
|
71 |
+
results.extend(batch_results)
|
|
|
|
|
|
|
72 |
time.sleep(1) # Задержка для предотвращения перегрузки
|
73 |
+
print("Анализ настроений завершен. Общие результаты: {results}")
|
74 |
return results
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|