#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
训练BERT分类器
"""

import pandas as pd
import torch
from torch.utils.data import Dataset, DataLoader
from transformers import BertTokenizer, BertForSequenceClassification, AdamW
from sklearn.model_selection import train_test_split
import numpy as np
import os
import argparse

class TransactionDataset(Dataset):
    def __init__(self, texts, labels, tokenizer, max_length=128):
        self.texts = texts
        self.labels = labels
        self.tokenizer = tokenizer
        self.max_length = max_length
    
    def __len__(self):
        return len(self.texts)
    
    def __getitem__(self, idx):
        text = str(self.texts[idx])
        label = self.labels[idx]
        
        encoding = self.tokenizer.encode_plus(
            text,
            add_special_tokens=True,
            max_length=self.max_length,
            padding='max_length',
            truncation=True,
            return_tensors='pt'
        )
        
        return {
            'input_ids': encoding['input_ids'].flatten(),
            'attention_mask': encoding['attention_mask'].flatten(),
            'label': torch.tensor(label, dtype=torch.long)
        }


class DataAugmentor:
    def augment(self, text):
        """通过同义词替换增强文本"""
        replacements = {'在':'于', '消费':'支出'}
        for k,v in replacements.items():
            text = text.replace(k, v)
        return text


def train_model(use_gpu=False):
    # 设置环境变量使用HF镜像源
    os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'
    
    # 检查基础模型是否存在
    model_path = '../model_cache/bert-base-chinese'  # 修改为相对路径
    if not os.path.exists(model_path):
        print("基础模型不存在，请运行 'python create_model.py' 下载基础模型。")
        return
    
    # 定义分类标签
    labels = ['餐饮', '交通', '购物', '娱乐', '医疗', '教育', '住房', '其他']
    label_map = {label: i for i, label in enumerate(labels)}
    
    # 设置设备
    device = torch.device('cuda' if use_gpu and torch.cuda.is_available() else 'cpu')
    print(f"使用设备: {device}")
    
    # 创建更全面的训练数据
    data_raw = [
        # 餐饮类
        ("在肯德基用餐", "餐饮"),
        ("星巴克咖啡", "餐饮"),
        ("麦当劳用餐", "餐饮"),
        ("海底捞聚餐", "餐饮"),
        ("在麦当劳吃饭", "餐饮"),
        ("肯德基外卖", "餐饮"),
        ("必胜客聚餐", "餐饮"),
        ("火锅聚餐", "餐饮"),
        ("烧烤晚餐", "餐饮"),
        ("日式料理", "餐饮"),
        ("韩式烤肉", "餐饮"),
        ("西餐厅用餐", "餐饮"),
        ("快餐消费", "餐饮"),
        ("外卖订餐", "餐饮"),
        ("奶茶饮品", "餐饮"),
        ("早餐", "餐饮"),
        ("午餐", "餐饮"),
        ("晚餐", "餐饮"),
        ("夜宵", "餐饮"),
        ("零食购买", "餐饮"),
        
        # 交通类
        ("乘坐地铁", "交通"),
        ("滴滴出行", "交通"),
        ("公交出行", "交通"),
        ("高铁出行", "交通"),
        ("出租车费用", "交通"),
        ("共享单车", "交通"),
        ("加油费用", "交通"),
        ("停车费", "交通"),
        ("高速公路费", "交通"),
        ("飞机票", "交通"),
        ("火车票", "交通"),
        ("网约车", "交通"),
        ("地铁月票", "交通"),
        ("公交卡充值", "交通"),
        ("车辆保养", "交通"),
        ("洗车费用", "交通"),
        ("过路费", "交通"),
        ("打车软件", "交通"),
        ("共享单车月卡", "交通"),
        ("电动车充电", "交通"),
        
        # 购物类
        ("在淘宝购物", "购物"),
        ("京东购物", "购物"),
        ("超市购物", "购物"),
        ("商场购物", "购物"),
        ("购买衣服", "购物"),
        ("购买鞋子", "购物"),
        ("购买化妆品", "购物"),
        ("购买手机", "购物"),
        ("购买电脑", "购物"),
        ("购买家电", "购物"),
        ("购买书籍", "购物"),
        ("购买文具", "购物"),
        ("购买礼品", "购物"),
        ("购买日用品", "购物"),
        ("购买家具", "购物"),
        ("购买数码产品", "购物"),
        ("购买运动装备", "购物"),
        ("购买母婴用品", "购物"),
        ("购买宠物用品", "购物"),
        ("购买保健品", "购物"),
        
        # 娱乐类
        ("看电影", "娱乐"),
        ("KTV唱歌", "娱乐"),
        ("游乐场游玩", "娱乐"),
        ("游戏充值", "娱乐"),
        ("音乐会门票", "娱乐"),
        ("演唱会门票", "娱乐"),
        ("游乐园门票", "娱乐"),
        ("健身房会员", "娱乐"),
        ("游泳馆消费", "娱乐"),
        ("棋牌室消费", "娱乐"),
        ("网吧上网", "娱乐"),
        ("酒吧消费", "娱乐"),
        ("温泉度假", "娱乐"),
        ("旅游费用", "娱乐"),
        ("摄影费用", "娱乐"),
        ("SPA按摩", "娱乐"),
        ("美容美发", "娱乐"),
        ("健身教练", "娱乐"),
        ("舞蹈课程", "娱乐"),
        ("瑜伽课程", "娱乐"),
        
        # 医疗类
        ("医院挂号", "医疗"),
        ("体检费用", "医疗"),
        ("药品购买", "医疗"),
        ("牙科治疗", "医疗"),
        ("眼科检查", "医疗"),
        ("疫苗接种", "医疗"),
        ("中医诊疗", "医疗"),
        ("西药费用", "医疗"),
        ("中药费用", "医疗"),
        ("医疗器械", "医疗"),
        ("保健品购买", "医疗"),
        ("康复治疗", "医疗"),
        ("心理咨询", "医疗"),
        ("核酸检测", "医疗"),
        ("疫苗费用", "医疗"),
        ("理疗费用", "医疗"),
        ("透析费用", "医疗"),
        ("住院费用", "医疗"),
        ("手术费用", "医疗"),
        ("门诊费用", "医疗"),
        
        # 教育类
        ("购买教材", "教育"),
        ("在线课程", "教育"),
        ("培训费用", "教育"),
        ("学费缴纳", "教育"),
        ("购买书籍", "教育"),
        ("购买文具", "教育"),
        ("考试报名费", "教育"),
        ("培训班费用", "教育"),
        ("网课费用", "教育"),
        ("学习资料", "教育"),
        ("教育软件", "教育"),
        ("培训教材", "教育"),
        ("学位费用", "教育"),
        ("留学费用", "教育"),
        ("课外辅导", "教育"),
        ("兴趣班费用", "教育"),
        ("技能学习", "教育"),
        ("职业培训", "教育"),
        ("学历教育", "教育"),
        ("成人教育", "教育"),
        
        # 住房类
        ("房租", "住房"),
        ("水电费", "住房"),
        ("物业费", "住房"),
        ("装修费用", "住房"),
        ("房屋维修", "住房"),
        ("宽带费用", "住房"),
        ("燃气费", "住房"),
        ("取暖费", "住房"),
        ("房屋中介费", "住房"),
        ("房屋保险", "住房"),
        ("家具购买", "住房"),
        ("家电购买", "住房"),
        ("租房押金", "住房"),
        ("房贷还款", "住房"),
        ("房屋税费", "住房"),
        ("小区停车费", "住房"),
        ("房屋清洁", "住房"),
        ("绿化费用", "住房"),
        ("电梯维护费", "住房"),
        ("房屋租赁", "住房"),
        
        # 其他类
        ("其他消费", "其他"),
        ("意外支出", "其他"),
        ("礼金支出", "其他"),
        ("投资理财", "其他"),
        ("保险费用", "其他"),
        ("慈善捐赠", "其他"),
        ("罚款费用", "其他"),
        ("手续费", "其他"),
        ("利息支出", "其他"),
        ("退款", "其他"),
        ("奖金收入", "其他"),
        ("工资收入", "其他"),
        ("兼职收入", "其他"),
        ("投资收益", "其他"),
        ("礼金收入", "其他"),
        ("退款收入", "其他"),
        ("政府补贴", "其他"),
        ("奖金", "其他"),
        ("报销费用", "其他"),
        ("其他收入", "其他"),
    ]
    
    # 数据增强
    augmentor = DataAugmentor()
    augmented_data = [(augmentor.augment(text), label) for text, label in data_raw]
    # 合并原始数据和增强数据
    data = data_raw + augmented_data
    
    # 准备训练数据
    texts = [item[0] for item in data]
    label_names = [item[1] for item in data]
    label_ids = [label_map[name] for name in label_names]
    
    # 初始化BERT tokenizer
    model_path = './model_cache/bert-base-chinese'
    
    # 加载tokenizer
    tokenizer = BertTokenizer.from_pretrained(model_path)
    
    # 创建数据集
    dataset = TransactionDataset(texts, label_ids, tokenizer)
    dataloader = DataLoader(dataset, batch_size=8, shuffle=True)
    
    # 初始化模型
    model = BertForSequenceClassification.from_pretrained(model_path, num_labels=len(labels), ignore_mismatched_sizes=True)
    
    # 将模型移动到指定设备
    model = model.to(device)
    
    # 设置优化器
    optimizer = AdamW(model.parameters(), lr=2e-5)
    
    # 训练模型
    model.train()
    for epoch in range(5):  # 训练5个epoch
        total_loss = 0
        for batch in dataloader:
            optimizer.zero_grad()
            input_ids = batch['input_ids'].to(device)
            attention_mask = batch['attention_mask'].to(device)
            labels = batch['label'].to(device)
            
            outputs = model(input_ids=input_ids, attention_mask=attention_mask, labels=labels)
            loss = outputs.loss
            loss.backward()
            optimizer.step()
            
            total_loss += loss.item()
        
        avg_loss = total_loss / len(dataloader)
        print(f"Epoch: {epoch+1}, Average Loss: {avg_loss}")
    
    # 保存模型
    os.makedirs('../model_cache/bert_transaction_classifier', exist_ok=True)  # 修改为相对路径
    model.save_pretrained('../model_cache/bert_transaction_classifier')
    tokenizer.save_pretrained('../model_cache/bert_transaction_classifier')
    print("模型已保存到 ../model_cache/bert_transaction_classifier")


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='训练BERT分类器')
    parser.add_argument('--use-gpu', action='store_true', help='使用GPU进行训练')
    args = parser.parse_args()
    
    train_model(use_gpu=args.use_gpu)