import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
import string
import pandas as pd
import re


def pre_set_title(title):
    # 创建一个32维的特征向量
    feature_vector = torch.zeros(32)

    # 处理标题，编码标点符号的类型和标题的长度
    title = title[1:-1]  # 去除首尾字符
    title_len = len(title)

    feature_vector[31] = title_len  # 标题长度

    # 处理标题，编码标点符号的类型和标题的长度
    for idx, char in enumerate(title):
        prob_punctuation = "，。！？；：“”‘’（）&#8203;``【oaicite:0】``&#8203;、《》|—" + string.punctuation
        units = re.findall(r'[\u4e00-\u9fff]|\b\w+\b|\W|\d+', title, re.U)
        # 如果字符是标点符号并且在string.punctuation中
        if char in prob_punctuation:
            index = prob_punctuation.index(char) + 1  # 获取标点符号在string.punctuation中的索引
            feature_vector[idx] = index  # 在对应的标点符号位置上设置为1
    return feature_vector


# 数据预处理
def get_features_and_labels(filename):
    with open(filename, 'r', encoding='gbk', errors='ignore') as file:
        data = pd.read_csv(file)
    data = data[data['read_count'] != -100]  # 去除异常值

    # 初始化特征向量和标签列表
    X = []
    y = []

    for _, row in data.iterrows():
        title = row['title']
        label = row['read_count']
        if (len(title) >= 32):
            continue
        # 获取1*32向量
        feature_vector = pre_set_title(title)
        X.append(feature_vector)
        y.append(label)

    # 转换为张量
    X = torch.stack(X, dim=0)
    y = np.array(y, dtype=float)
    y_max = np.max(y)
    y = y / y_max
    y = torch.tensor(y, dtype=torch.float32).view(-1, 1)

    return X, y


# 定义前馈神经网络模型
class FeedforwardNN(nn.Module):
    def __init__(self, input_size, hidden_size, output_size):
        super(FeedforwardNN, self).__init__()
        self.fc1 = nn.Linear(input_size, hidden_size)
        self.relu = nn.ReLU()
        self.fc2 = nn.Linear(hidden_size, output_size)

    def forward(self, x):
        x = self.fc1(x)
        x = self.relu(x)
        x = self.fc2(x)
        return x


def train(dataset_size):
    X = []
    y = []
    for i in range(dataset_size):
        feature, label = get_features_and_labels(f"{i + 1}.csv")
        X.append(feature)
        y.append(label)
    X = torch.cat(X, dim=0)
    y = torch.cat(y, dim=0)
    print(X.shape)
    print(y.shape)

    # 初始化 PCA 模型并拟合数据
    pca = PCA(n_components=5)  # 指定要降到的维度（这里将数据降至2维）
    pca.fit(X)

    # 转换数据到新的低维表示
    X = pca.transform(X)

    scaler = StandardScaler()
    # 数据标准化
    X = scaler.fit_transform(X.reshape(X.shape[0], -1))
    X = torch.tensor(X, dtype=torch.float32)

    # 拆分数据集为训练集和测试集
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)

    # 超参数
    input_size = X_train.shape[1]
    hidden_size = 64  # 可根据需要调整隐藏层大小
    output_size = 1  # 输出层为1，因为是回归问题

    # 初始化模型
    model = FeedforwardNN(input_size, hidden_size, output_size)

    # 定义损失函数和优化器
    criterion = nn.MSELoss()
    optimizer = optim.Adam(model.parameters(), lr=0.01)

    # 训练模型
    num_epochs = 10
    for epoch in range(num_epochs):
        # 将数据转换为Tensor
        inputs = torch.tensor(X_train.clone().detach(), dtype=torch.float32)
        labels = torch.tensor(y_train.clone().detach(), dtype=torch.float32)

        # 前向传播
        outputs = model(inputs)
        loss = criterion(outputs, labels)

        # 反向传播与优化
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if (epoch + 1) % 10 == 0:
            print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.4f}')

    model.eval()
    with torch.no_grad():
        test_inputs = torch.tensor(X_test.clone().detach(), dtype=torch.float32)
        test_labels = torch.tensor(y_test.clone().detach(), dtype=torch.float32)
        test_outputs = model(test_inputs)
        test_loss = criterion(test_outputs, test_labels)
        print(f'Test Loss: {test_loss.item():.4f}')

    return model, pca, scaler