import torch
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, classification_report
from transformers import (
    BertTokenizer,
    BertForSequenceClassification,
    Trainer,
    TrainingArguments,
    DataCollatorWithPadding
)
from datasets import Dataset, DatasetDict
import pandas as pd
import logging
import requests


# 设置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class BertTextClassifier:
    def __init__(self, model_name="bert-base-chinese", num_labels=2):
        """
        初始化BERT文本分类器
        :param model_name: BERT模型名称
        :param num_labels: 分类类别数量
        """
        self.model_name = model_name
        self.num_labels = num_labels
        model_path = "G:\\workspace\\models\\bert_base_chinese"

        # 加载tokenizer和模型
        self.tokenizer = BertTokenizer.from_pretrained(model_path,local_files_only=True)
        self.model = BertForSequenceClassification.from_pretrained(
            model_path, 
            local_files_only=True,
            num_labels=num_labels
        )
        
        # 检查是否有GPU可用
        self.device = "cuda" if torch.cuda.is_available() else "cpu"
        self.model.to(self.device)
        logger.info(f"使用设备: {self.device}")

    def preprocess_function(self, examples):
        """预处理文本数据"""
        return self.tokenizer(
            examples["text"],
            truncation=True,
            padding="max_length",
            max_length=512
        )

    def load_and_preprocess_data(self, texts, labels, test_size=0.2, random_state=42):
        """
        加载并预处理数据
        :param texts: 文本列表
        :param labels: 标签列表
        :param test_size: 测试集比例
        :param random_state: 随机种子
        """
        # 划分训练集和测试集
        train_texts, val_texts, train_labels, val_labels = train_test_split(
            texts, labels, test_size=test_size, random_state=random_state
        )
        
        # 创建数据集字典
        train_dict = {"text": train_texts, "label": train_labels}
        val_dict = {"text": val_texts, "label": val_labels}
        
        # 转换为Dataset对象
        train_dataset = Dataset.from_dict(train_dict)
        val_dataset = Dataset.from_dict(val_dict)
        
        # 组合成DatasetDict
        self.dataset = DatasetDict({
            "train": train_dataset,
            "validation": val_dataset
        })
        
        # 预处理数据集
        self.tokenized_dataset = self.dataset.map(
            self.preprocess_function, 
            batched=True
        )
        
        # 数据整理器
        self.data_collator = DataCollatorWithPadding(tokenizer=self.tokenizer)
        
        logger.info(f"数据加载完成 - 训练集: {len(train_texts)}, 验证集: {len(val_texts)}")

    def compute_metrics(self, eval_pred):
        """计算评估指标"""
        logits, labels = eval_pred
        predictions = np.argmax(logits, axis=-1)
        return {
            "accuracy": accuracy_score(labels, predictions),
        }

    def train(self, output_dir="./bert_classifier", epochs=3, batch_size=8, learning_rate=2e-5):
        """
        训练模型
        :param output_dir: 模型保存目录
        :param epochs: 训练轮数
        :param batch_size: 批次大小
        :param learning_rate: 学习率
        """
        # 训练参数
        training_args = TrainingArguments(
            output_dir=output_dir,
            num_train_epochs=epochs,
            per_device_train_batch_size=batch_size,
            per_device_eval_batch_size=batch_size,
            warmup_steps=500,
            weight_decay=0.01,
            logging_dir="./logs",
            logging_steps=10,
            eval_strategy="epoch",
            save_strategy="epoch",
            load_best_model_at_end=True,
            metric_for_best_model="accuracy",
            learning_rate=learning_rate,
        )

        # 初始化Trainer
        self.trainer = Trainer(
            model=self.model,
            args=training_args,
            train_dataset=self.tokenized_dataset["train"],
            eval_dataset=self.tokenized_dataset["validation"],
            compute_metrics=self.compute_metrics,
            data_collator=self.data_collator,
        )

        # 开始训练
        logger.info("开始训练...")
        self.trainer.train()
        
        # 保存模型和tokenizer
        self.model.save_pretrained(output_dir)
        self.tokenizer.save_pretrained(output_dir)
        logger.info(f"模型已保存至 {output_dir}")

    def evaluate(self):
        """评估模型"""
        logger.info("开始评估...")
        predictions, labels, _ = self.trainer.predict(self.tokenized_dataset["validation"])
        preds = np.argmax(predictions, axis=-1)
        
        # 打印详细评估报告
        print(classification_report(labels, preds))
        return accuracy_score(labels, preds)

    def predict(self, text):
        """预测单条文本"""
        inputs = self.tokenizer(
            text, 
            return_tensors="pt", 
            truncation=True, 
            padding="max_length", 
            max_length=128
        ).to(self.device)
        
        with torch.no_grad():
            outputs = self.model(**inputs)
            
        logits = outputs.logits
        predicted_class_id = logits.argmax().item()
        return predicted_class_id




# 请求网络数据去训练
def requestDataToTrain():
    
    # 初始化分类器
    # classifier = BertTextClassifier(model_name="bert-base-chinese", num_labels=2)
    
    #分页请求数据 直到全部数据获取完
    page = 1
    page_size = 100
    # category 0是默认 1 信息技术类 信息科技类处罚
    category = 1 
    # type 0是默认 1处罚信息 2是监管文件
    type = 1
    while True:
        url = "http://47.111.65.44/v1/article/list"
        data = {
            "page": page,
            "page_size": page_size,
            "category": category,
            "type": type
        }
        response = requests.get(url, params=data)
        print(response.json())
        if response.status_code == 200:
            # 解析响应
            responseData = response.json()
            if responseData.get("code") == 0:
                articles = responseData.get("data", {}).get("articles", [])
                texts = []
                labels = []
                for article in articles:
                    print(f"标题: {article['title']}")
                    print(f"内容: {article['content'][:200]}...")  # 仅打印前200个字符
                    print(f"分类: {article['category']}")
                    content = article['content']
                     # 正则处理content文本 取出违法事实 处罚依据之间的文本
                    import re
                    # 匹配违法事实和处罚依据之间的文本，考虑可能的标点符号和空格
                    pattern = r'违法事实\s*[:：]?\s*(.*?)\s*处罚依据\s*[:：]?'
                    match = re.search(pattern, content, re.DOTALL)
                    if match:
                        extracted_text = match.group(1).strip()
                    else:
                        extracted_text = ""
                    # 去掉所有的换行符
                    extracted_text = extracted_text.replace('\n', '')
                    texts.append(extracted_text)
                    labels.append(article['category'])   
                print(texts)
                print(labels)
                

                # break
                # categoryToTrain(classifier, texts, labels)
                page += 1
                if len(articles) < page_size:
                    print(f"当前页数据量: {len(articles)}")
                    print(f"总共获取的次数: {page-1}")
                    print("数据获取完毕")
                    break
            else:
                print(f"请求失败，状态码: {response.status_code}")
                print(response.text)
                break
        
def categoryToTrain(classifier:BertTextClassifier, texts,labels):
    # 加载和预处理数据
    classifier.load_and_preprocess_data(texts, labels, test_size=0.2)
    
    # 训练模型
    classifier.train(
        output_dir="./sentiment_analysis_model",
        epochs=3,
        batch_size=8 if torch.cuda.is_available() else 3,
        learning_rate=2e-5
    )
    
    # 评估模型
    accuracy = classifier.evaluate()
    logger.info(f"验证集准确率: {accuracy:.4f}")
    
    # # 测试预测
    # test_texts = [
    #     "这是一次很棒的体验！",
    #     "我对这个结果非常失望。",
    #     "这个餐厅的食物太难吃了，环境也很差。"
    # ]
    
    # for text in test_texts:
    #     pred = classifier.predict(text)
    #     print(pred)
    #     sentiment = "好评" if pred == 1 else "差评"
    #     print(f"文本: {text} -> 预测情感: {sentiment}")
    



def sentimentRequestDataToTrain():
    # 生成示例数据（实际使用时替换为你的数据）
    # 这里使用简单的情感分析示例：0表示负面，1表示正面
    texts = [
        "这部电影太精彩了，我非常喜欢！",
        "这个产品质量很差，不推荐购买。",
        "今天天气真好，心情也跟着变好了。",
        "服务态度恶劣，以后再也不会来了。",
        "这是我看过最好的一本书，强烈推荐！",
        "这个餐厅的食物太难吃了，环境也很差。",
        "这是一次很棒的体验！",
        "我对这个结果非常失望。",
        "我很满意这个服务，会再次来。",
        "这个商品的价格很合理，值得购买。",
        "我不喜欢这个服务，会拒绝再次来。",
        "这个商品的价格很不合理，不值得购买。",
        # 这里可以添加更多样本...
    ]
    
    # 对应的标签
    labels = [1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0]


    
    
    # 初始化分类器
    classifier = BertTextClassifier(model_name="bert-base-chinese", num_labels=2)
    
    # 加载和预处理数据
    classifier.load_and_preprocess_data(texts, labels, test_size=0.2)
    
    # 训练模型
    classifier.train(
        output_dir="./sentiment_analysis_model",
        epochs=3,
        batch_size=8 if torch.cuda.is_available() else 3,
        learning_rate=2e-5
    )
    
    # 评估模型
    accuracy = classifier.evaluate()
    logger.info(f"验证集准确率: {accuracy:.4f}")
    
    # 测试预测
    test_texts = [
        "这是一次很棒的体验！",
        "我对这个结果非常失望。",
        "这个餐厅的食物太难吃了，环境也很差。"
    ]
    
    for text in test_texts:
        pred = classifier.predict(text)
        print(pred)
        sentiment = "好评" if pred == 1 else "差评"
        print(f"文本: {text} -> 预测情感: {sentiment}")

# 示例用法
if __name__ == "__main__":
    # sentimentRequestDataToTrain()
    requestDataToTrain()
    pass
