import os

import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
import nltk
import re
# 下载 NLTK 的停用词和分词工具
nltk.download('punkt_tab')
nltk.download('stopwords')
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import string
import shutil

# 文本预处理函数
def preprocess_text(text):
    text = re.sub(r'[^\w\s\u4e00-\u9fa5]', '', text)
    # 转为小写
    text = text.lower()
    # 去除标点符号
    text = text.translate(str.maketrans('', '', string.punctuation))
    # 分词
    tokens = word_tokenize(text)
    # 去除停用词
    tokens = [word for word in tokens if word not in stopwords.words('english')]
    return ' '.join(tokens)

# 读取数据
def load_data(file_path):
    texts = []
    labels = []
    
    with open(file_path, 'r', encoding='utf-8') as f:
        for line in f:
            # 分割行，提取标签和文本
            parts = line.strip().split(' +++$+++ ')
            if len(parts) == 2:
                label = int(parts[0])  # 将标签转换为整数
                text = parts[1]        # 文本部分
                texts.append(preprocess_text(text))  # 预处理文本
                labels.append(label)   # 保存标签

    return texts, labels

# 读取无标签数据的函数
def load_unlabeled_data(file_path):
    texts = []
    
    # 打开文件并逐行读取
    with open(file_path, 'r', encoding='utf-8') as f:
        for line in f:
            # 去除行末的换行符和空格
            text = line.strip()
            if text:  # 确保文本不为空
                texts.append(text)  # 将文本添加到列表中

    return texts

# 读取测试数据的函数
def load_test_data(file_path):
    texts = []
    
    # 打开文件并逐行读取
    with open('data/test.txt', 'r', encoding='utf-8', errors='ignore') as f:
        for line in f:
            # 去除行末的换行符和空格
            line = line.strip()
            if line:  # 确保文本不为空
                # 分割行，提取文本
                _, text = line.split(',', 1)  # 只分割一次，忽略序号
                text = re.sub(r'[^\w\s\u4e00-\u9fa5]', '', text)
                texts.append(text.strip())  # 去除文本前后的空白字符

    return texts

# 加载数据并预处理
file_path = 'data/train.txt'  # 替换为你的文件路径
print("load train data………………")
texts, labels = load_data(file_path)
 
# 将结果转换为 DataFrame（可选）
data = pd.DataFrame({'text': texts, 'label': labels})
print("data columns: ", data.columns.tolist())
# 特征提取
vectorizer = TfidfVectorizer(max_features=5000)
X_train = vectorizer.fit_transform(data['text']).toarray()
print("X_train: ", len(X_train))
y_train = data['label'].values
print("y_train: ", len(y_train))

# # 分割训练集和验证集
# X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=42)

# 构建逻辑回归模型并训练
model = LogisticRegression()
model.fit(X_train, y_train)

# # 在验证集上评估模型性能
# y_pred_val = model.predict(X_val)
# print("Validation Classification Report:")
# print(classification_report(y_val, y_pred_val))

# 读取无标签数据并进行预处理
unlabeled_data = load_unlabeled_data('data/nolabel.txt')  # 确保文件路径正确
unlabeled_data = pd.DataFrame({'text': unlabeled_data})
# unlabeled_data = unlabeled_data.apply(preprocess_text)

# 特征提取无标签数据
X_unlabeled = vectorizer.transform(unlabeled_data).toarray()

# 使用已训练的模型进行预测概率
predicted_probs = model.predict_proba(X_unlabeled)  # 获取每个类的预测概率

# 设定置信度阈值
confidence_threshold = 0.5

# 筛选高置信度样本的索引
high_confidence_indices = np.where(np.max(predicted_probs, axis=1) > confidence_threshold)[0]
high_confidence_samples = unlabeled_data.iloc[high_confidence_indices]
high_confidence_labels = np.argmax(predicted_probs[high_confidence_indices], axis=1)  # 获取预测标签

# 将高置信度样本添加到训练集中
new_train_texts = data['text'].tolist() + high_confidence_samples['text'].tolist()
new_train_labels = y_train.tolist() + high_confidence_labels.tolist()
print("new_train_texts: ", len(new_train_texts))
print("new_train_labels: ", len(new_train_labels))

# 重新编码新训练集并创建新的 Dataset 对象
new_train_encodings = vectorizer.fit_transform(new_train_texts).toarray()  # 更新特征提取器以适应新数据

# 使用新的训练集重新训练模型（可选）
model.fit(new_train_encodings, new_train_labels)

print("Finished updating the training set with high-confidence samples.")


# 测试集上进行推理
test_data = load_test_data('data/test.txt')

# 将结果转换为 DataFrame（可选）
test_data = pd.DataFrame({'text': test_data})

# test_data['text'] = test_data['text'].apply(preprocess_text)
X_test = vectorizer.transform(test_data['text']).toarray()

# 进行预测
test_predictions = model.predict(X_test)

# 批次，如果进行了大的更新，建议保存相应代码及预测结果
# 批次规则：YYYYMMDDXX，XX每日从01开始
path = 'data/2025010201/'
if not os.path.exists(path):
    os.mkdir(path)
# 输出结果或保存到文件中
test_data.index.name = 'index'
test_data['label'] = test_predictions
test_data.to_csv(path+'predictions.csv', index=False, encoding='utf-8')
test_data['label'].to_csv(path+'submission.csv', index=True, encoding='utf-8')

# Save the notebook files
shutil.copy('main.py', path+'main.py')
