import numpy as np
import pandas as pd
from sklearn.model_selection import KFold
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.metrics import f1_score, classification_report, accuracy_score
from sklearn.pipeline import Pipeline
from time import time


class TextClassifier:
    def __init__(self, n_splits=10, random_state=7):
        self.n_splits = n_splits
        self.random_state = random_state

        # 添加更多参数配置
        self.config = {
            'tfidf': {
                'sublinear_tf': True,
                'strip_accents': 'unicode',
                'analyzer': 'word',
                'token_pattern': r'\w{1,}',
                'stop_words': 'english',
                'ngram_range': (1, 3),
                'max_features': 10000
            },
            'classifier': {
                'random_state': random_state,
                'max_iter': 1000  # 增加最大迭代次数避免警告
            }
        }

        # 创建pipeline
        self.pipeline = Pipeline([
            ('tfidf', TfidfVectorizer(**self.config['tfidf'])),
            ('classifier', LinearSVC(**self.config['classifier']))
        ])

    def load_data(self, train_path, test_path, sample_size=None):
        """
        加载数据，并可选择对训练集进行采样
        参数:
            train_path: 训练集路径
            test_path: 测试集路径
            sample_size: 训练集采样数量，如果为None则使用全部数据
        """
        try:
            # 读取数据
            self.train_df = pd.read_csv(train_path, sep='\t')
            self.test_df = pd.read_csv(test_path, sep='\t')

            # 只对训练集进行采样
            if sample_size is not None:
                print(f'原始训练集大小: {len(self.train_df)}')

                self.train_df = self.train_df.sample(
                    n=min(sample_size, len(self.train_df)),
                    random_state=self.random_state
                )

                print(f'采样后训练集大小: {len(self.train_df)}')
                print(f'测试集大小: {len(self.test_df)}')

        except Exception as e:
            raise Exception(f"加载数据时出错: {str(e)}")

    def cross_validate(self):
        kf = KFold(n_splits=self.n_splits, shuffle=True, random_state=self.random_state)
        f1_scores = []
        test_predictions = []

        total_start_time = time()

        # 添加早停机制
        best_f1 = 0
        patience = 5
        no_improve = 0

        # 获取总样本数用于进度显示
        total_samples = len(self.train_df)

        for fold, (train_idx, valid_idx) in enumerate(kf.split(self.train_df), 1):
            fold_start_time = time()
            print(f'\nFold {fold}/{self.n_splits}')

            # Split data
            X_train = self.train_df.iloc[train_idx]['text']
            y_train = self.train_df.iloc[train_idx]['label']
            X_valid = self.train_df.iloc[valid_idx]['text']
            y_valid = self.train_df.iloc[valid_idx]['label']

            print(f'训练样本数: {len(X_train)}, 验证样本数: {len(X_valid)}')

            # Train and predict
            print('开始训练...')
            self.pipeline.fit(X_train, y_train)
            valid_pred = self.pipeline.predict(X_valid)

            # Calculate metrics
            fold_f1 = f1_score(y_valid, valid_pred, average='macro')
            fold_accuracy = accuracy_score(y_valid, valid_pred)
            f1_scores.append(fold_f1)

            # 计算这一折用时
            fold_time = time() - fold_start_time
            print(f'本折用时: {fold_time / 60:.2f} 分钟')

            # 在第一折结束后预估总时间
            if fold == 1:
                estimated_total_time = fold_time * self.n_splits
                estimated_remaining_time = estimated_total_time - fold_time
                print(f'\n预估总时间: {estimated_total_time / 60:.2f} 分钟')
                print(f'预估剩余时间: {estimated_remaining_time / 60:.2f} 分钟')
            else:
                elapsed_time = time() - total_start_time
                avg_fold_time = elapsed_time / fold
                remaining_folds = self.n_splits - fold
                estimated_remaining_time = avg_fold_time * remaining_folds
                print(f'预估剩余时间: {estimated_remaining_time / 60:.2f} 分钟')

            print(f'F1 Score: {fold_f1:.4f}')
            print(f'准确率: {fold_accuracy:.4f}')
            print(f'分类报告:\n{classification_report(y_valid, valid_pred)}')

            # 早停检查
            if fold_f1 > best_f1:
                best_f1 = fold_f1
                no_improve = 0
            else:
                no_improve += 1
                if no_improve >= patience:
                    print('触发早停机制')
                    break

            # Predict test set
            print('预测测试集...')
            test_pred = self.pipeline.predict(self.test_df['text'])
            test_predictions.append(test_pred)

        total_time = time() - total_start_time
        print(f'\n交叉验证完成!')
        print(f'总用时: {total_time / 60:.2f} 分钟')
        print(f'平均每折用时: {(total_time / fold) / 60:.2f} 分钟')
        print(f'Average F1 Score: {np.mean(f1_scores):.4f} ± {np.std(f1_scores):.4f}')

        # Aggregate test predictions
        final_predictions = np.array([np.argmax(np.bincount(row))
                                      for row in np.array(test_predictions).T])
        return final_predictions

    def save_predictions(self, predictions, template_path, output_path):
        submission = pd.read_csv(template_path)
        submission['label'] = predictions
        submission.to_csv(output_path, index=False)
        print(f'Predictions saved to {output_path}')


def main():
    # 初始化分类器
    classifier = TextClassifier()

    try:
        # 加载数据，训练集采样10000条，测试集保持原样
        classifier.load_data(
            train_path='./data/train_set.csv',
            test_path='./data/test_a.csv',
            sample_size=200000  # 只对训练集采样200000条数据
        )

        # 执行交叉验证并获取预测结果
        predictions = classifier.cross_validate()

        # 保存预测结果
        classifier.save_predictions(
            predictions=predictions,
            template_path='./data/test_a_sample_submit.csv',
            output_path='./output/LinearSVC_submission.csv'
        )
    except Exception as e:
        print(f"错误: {str(e)}")


if __name__ == '__main__':
    main()