import torch
import torch.nn as nn
import torch.optim as optim
import json
import math
from torch.utils.data import Dataset, DataLoader, random_split
import torch.nn.functional as F
from tqdm import tqdm
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from collections import defaultdict
import argparse

log_file = "./detecionLog.txt"
# 定义数据集类
# 修改 SeqDataset 类以支持直接传入数据
class SeqDataset(Dataset):
    def __init__(self, json_file=None, data=None, max_len=512, languages=['py','js','perl','php']):
        self.data = {language: [] for language in languages}  # 按照语言存储数据  
        if json_file:
            with open(json_file, 'r', encoding='utf-8') as f,tqdm(total=81775,unit="line",desc="Loading Datasets") as pbar:
                for line in f:
                    item = json.loads(line)
                    features = item['features'][:max_len]
                    language = item['language']
                    if language in self.data:
                        self.data[language].append({
                            'features': features,
                            'label': item['label'],
                            'language': item['language']
                        })
                    pbar.update(1)
            print(f"DataSet Total {sum(len(v) for v in self.data.values())} Samples")
        elif data:
            for item in data:
                self.data[item['language']].append(item)  # 根据语言存储数据
        
    def __len__(self):
        return sum(len(v) for v in self.data.values())  # 总数据量

    def __getitem__(self, idx):
        # 按照语言获取对应的数据项
        for language, items in self.data.items():
            if idx < len(items):
                item = items[idx]
                return {'features': torch.tensor(item['features'], dtype=torch.long),
                        'label': torch.tensor(item['label'], dtype=torch.long),
                        'language': language}
            idx -= len(items)
    
    def token_to_id(self, token):
        if token == '<pad>':
            return 0
        return hash(token) % 13203 + 1  # 避免与pad_id=0冲突


# TextCNN模型
class TextCNN(nn.Module):
    def __init__(self, vocab_size, embed_dim, num_classes, kernel_sizes=(1,), num_channels=50, dropout=0.5, languages=['py','js','perl','php']):
        super(TextCNN, self).__init__()
        self.embedding = nn.Embedding(vocab_size, embed_dim)
        
        # Convolution layers
        self.convs = nn.ModuleList([nn.Conv2d(1, num_channels, (k, embed_dim)) for k in kernel_sizes])
        
        # Fully connected layer
        self.fc = nn.Linear(len(kernel_sizes) * num_channels, num_classes)
        
        self.dropout = nn.Dropout(dropout)
        
        # 为每个语言设置一个单独的softmax层
        self.language_softmaxes = nn.ModuleDict({language: nn.Linear(num_classes, num_classes) for language in languages})
    
    def forward(self, x, language):
        x = self.embedding(x)  # (batch_size, seq_len, embed_dim)
        x = x.unsqueeze(1)  # (batch_size, 1, seq_len, embed_dim)
        
        convs = [F.relu(conv(x)).squeeze(3) for conv in self.convs]  # List of (batch_size, num_channels, seq_len-kernel_size+1)
        pools = [F.max_pool1d(conv, conv.size(2)).squeeze(2) for conv in convs]  # List of (batch_size, num_channels)
        
        out = torch.cat(pools, 1)  # (batch_size, num_channels * len(kernel_sizes))
        
        out = self.dropout(out)
        out = self.fc(out)  # (batch_size, num_classes)
        
        # 根据语言选择对应的softmax层
        out = self.language_softmaxes[language](out)  # (batch_size, num_classes)
        
        return out


def evaluate_model(model, dataloader, device, language):
    model.eval()
    true_labels, pred_labels = [], []
    language_results = {'true_labels': [], 'pred_labels': []}
    
    with torch.no_grad():
        for batch in tqdm(dataloader, desc=f"Testing language: {language}", ncols=100):
            features, labels, languages = batch['features'].to(device), batch['label'].to(device), batch['language']
            outputs = model(features, language)  # 获取模型输出
            _, predicted = torch.max(outputs, 1)  # 获取预测结果
            true_labels.extend(labels.cpu().numpy())
            pred_labels.extend(predicted.cpu().numpy())
            
            # 存储该语言的标签和预测结果
            for lang, true_label, pred_label in zip(languages, labels.cpu().numpy(), predicted.cpu().numpy()):
                if lang == language:
                    language_results['true_labels'].append(true_label)
                    language_results['pred_labels'].append(pred_label)
    
    # 计算评估指标
    acc = accuracy_score(language_results['true_labels'], language_results['pred_labels'])
    precision = precision_score(language_results['true_labels'], language_results['pred_labels'], average='binary')
    recall = recall_score(language_results['true_labels'], language_results['pred_labels'], average='binary')
    f1 = f1_score(language_results['true_labels'], language_results['pred_labels'], average='binary')
    
    return acc, precision, recall, f1


# 主训练流程
def main(args):
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"Using Device: {device}")
    
    # 设置支持的语言列表
    languages = ['py', 'js', 'perl', 'php']
    
    # 加载数据集，传递语言参数
    full_dataset = SeqDataset(args.dataset_path, languages=languages)
    
    # 为每个语言创建单独的数据集
    language_train_datasets = {}
    language_test_datasets = {}
    for language in languages:
        data = full_dataset.data[language]
        train_size = int(args.train_size * len(data))
        test_size = len(data) - train_size
        train_data, test_data = random_split(data, [train_size, test_size],generator=torch.Generator().manual_seed(42))
        language_train_datasets[language] = SeqDataset(data=train_data, languages=languages)
        language_test_datasets[language] = SeqDataset(data=test_data, languages=languages)
    
    # 初始化模型
    vocab_size = 13203  # 根据实际词表大小设置
    embed_dim = 300  # 嵌入维度
    num_classes = 2  # 分类的类别数量
    kernel_sizes = [3, 4, 5]  # 卷积核的尺寸
    num_channels = 100  # 每个卷积核的通道数
    dropout = 0.5  # dropout比例
    text_cnn_model = TextCNN(vocab_size, embed_dim, num_classes, kernel_sizes, num_channels, dropout, languages).to(device)
    
    # 使用Adam优化器
    optimizer = optim.Adam(text_cnn_model.parameters(), lr=0.001)

    # 循环进行训练：首先按epoch迭代，再按language迭代
    language_scores = {}
    Log = []
    for epoch in range(args.epoch):  # 每个epoch进行训练
        print(f"Start Training Epoch {epoch+1}")
        
        # 每个language进行训练
        for language in languages:
            print(f"Training Lanugage: {language}")
            
            train_dataloader = DataLoader(language_train_datasets[language], batch_size=args.batch_size, shuffle=True)
            test_dataloader = DataLoader(language_test_datasets[language], batch_size=args.batch_size, shuffle=False)
            
            text_cnn_model.train()
            epoch_loss = 0
            for batch in tqdm(train_dataloader, desc=f"Training {language} Epoch {epoch+1}", ncols=100):
                features, labels = batch['features'].to(device), batch['label'].to(device)
                
                optimizer.zero_grad()
                outputs = text_cnn_model(features, language)
                loss = F.cross_entropy(outputs, labels)
                loss.backward()
                optimizer.step()
                epoch_loss += loss.item()
            train_res = f"{language} Epoch {epoch+1} - Loss: {epoch_loss/len(train_dataloader):.4f}"
            Log.append(train_res)
            print(train_res)
        
    # 训练完成后对每种语言进行测试
    for language in languages:
        print(f"Start Tetsing: {language}")
        
        test_dataloader = DataLoader(language_test_datasets[language], batch_size=args.batch_size, shuffle=False)
        acc, precision, recall, f1 = evaluate_model(text_cnn_model, test_dataloader, device,language)
        test_res = f"{language} Accuracy: {acc:.4f} Precision: {precision:.4f} Recall: {recall:.4f} F1 Score: {f1:.4f}"
        print(test_res)
        Log.append(test_res)
        language_scores[language] = {'acc': acc, 'precision': precision, 'recall': recall, 'f1': f1}
        
    # 打印每个epoch结束后的平均指标
    avg_acc = sum(scores['acc'] for scores in language_scores.values()) / len(language_scores)
    avg_precision = sum(scores['precision'] for scores in language_scores.values()) / len(language_scores)
    avg_recall = sum(scores['recall'] for scores in language_scores.values()) / len(language_scores)
    avg_f1 = sum(scores['f1'] for scores in language_scores.values()) / len(language_scores)
    avg_res = f"Epoch {epoch+1} - Average Accuracy: {avg_acc:.4f} Average Precision: {avg_precision:.4f} Average Recall: {avg_recall:.4f} Average F1 Score: {avg_f1:.4f}"    
    Log.append(avg_res)
    print(avg_res)
    with open(log_file) as f:
        f.writelines(Log)



if __name__ == '__main__':
    parser = argparse.ArgumentParser(description="训练TextCNN模型")
    parser.add_argument("--dataset_path", type=str, required=True, help="数据集路径")
    parser.add_argument("--spm_model", type=str, required=True, help="spm模型路径")
    parser.add_argument("--epoch", type=int, required=True, help="训练epochs")
    parser.add_argument("--train_size", type=float, required=True, help="训练集比例")
    parser.add_argument("--batch_size", type=int, required=True, help="批次大小")
    args = parser.parse_args()
    main(args)