import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn import svm
train=pd.read_csv('/data/sfq/oco/train.csv',sep='\t')
test=pd.read_csv('/data/sfq/oco/test_new.csv')

import jieba
import re
def extractChinese(s):
    pattern="[\u4e00-\u9fa5]+"  # 中文正则表达式
    regex = re.compile(pattern)  # 生成正则对象
    results = regex.findall(s)  # 匹配
    chinese_text = "".join(results)
    # 使用 jieba 分词
    words = jieba.cut(chinese_text)
    # 导入停用词
    stopwords = set()
    with open('/data/sfq/oco/stopwords.txt', 'r', encoding='utf-8') as f:
        for line in f:
            stopwords.add(line.strip())
    # 去除停用词
    filtered_words = [word for word in words if word not in stopwords]
    return " ".join(filtered_words)

# 预处理数据
label = train['label']
train_data = []
for i in range(len(train['comment'])):
    train_data.append(extractChinese(train['comment'][i]))
test_data = []
for i in range(len(test['comment'])):
    test_data.append(extractChinese(test['comment'][i]))

# 合并训练集和测试集的评论
combined_data = train_data + test_data
# 初始化 TfidfVectorizer
tfidf = TfidfVectorizer(
    min_df=1,  # 最小支持长度
    max_features=150000,  # 取特征数量
    strip_accents='unicode',
    analyzer='word',
    token_pattern=r'\w{1,}',
    ngram_range=(1, 3),  
    use_idf=True,
    smooth_idf=True,
    sublinear_tf=True,
    stop_words=None
)

# 进行 TFIDF 向量化
tfidf_matrix = tfidf.fit_transform(combined_data)
# 恢复成训练集和测试集部分
train_tfidf = tfidf_matrix[:len(train_data)]
test_tfidf = tfidf_matrix[len(train_data):]
# 转换为数组
train_tfidf_array = train_tfidf.toarray()
test_tfidf_array = test_tfidf.toarray()
# 输出训练集和测试集的TFIDF向量
print("Train TFIDF shape:", train_tfidf_array.shape)
print("Test TFIDF shape:", test_tfidf_array.shape)

import random
from sklearn.svm import LinearSVC
clf=LinearSVC(loss='squared_hinge', dual=True, tol=0.0001,
                  C=1.0, multi_class='ovr', fit_intercept=True, intercept_scaling=1,
                  class_weight='balanced', verbose=0, random_state=None, max_iter=1000)
# 训练LinearSVC分类器
svm=clf.fit(train_tfidf,label)
# 预测测试集
svm_pre=svm.predict(test_tfidf)
# 创建DataFrame以保存预测结果
svm = pd.DataFrame(data=svm_pre, columns=['comment'])
svm['id'] = test.id
svm = svm[['id', 'comment']]
# 打印随机选取的两条预测结果
def print_random_predictions(test_data, predictions, num_samples=2):
    indices = random.sample(range(len(test_data)), num_samples)
    for i in indices:
        print(f"评论ID: {test['id'].iloc[i]}")
        print(f"评论内容: \"{test_data[i]}\"")
        print(f"预测标签: {'与食品安全相关' if predictions[i] == 1 else '与食品安全无关'}")
        print()

# 调用函数打印两条随机预测结果
print_random_predictions(test_data, svm_pre)
svm.to_csv('svm.csv',index=False)

from sklearn.metrics import precision_score, recall_score, f1_score
from sklearn.model_selection import train_test_split

# 将训练数据分割为训练集和验证集
X_train, X_val, y_train, y_val = train_test_split(train_tfidf_array, label, test_size=0.2, random_state=42)

# 训练模型
clf = LinearSVC(loss='squared_hinge', dual=True, tol=0.0001,
                C=1.0, multi_class='ovr', fit_intercept=True, intercept_scaling=1,
                class_weight='balanced', verbose=0, random_state=None, max_iter=1000)
svm = clf.fit(X_train, y_train)

# 在验证集上预测
val_predictions = svm.predict(X_val)

# 计算评价指标
precision = precision_score(y_val, val_predictions, average='weighted')
recall = recall_score(y_val, val_predictions, average='weighted')
f1 = f1_score(y_val, val_predictions, average='weighted')

# 打印评价指标
print(f"准确率: {precision:.4f}")
print(f"召回率: {recall:.4f}")
print(f"F1 分数: {f1:.4f}")