import pandas as pd
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from transformers import BertModel, BertPreTrainedModel, BertTokenizer, get_linear_schedule_with_warmup
from torch.optim import AdamW
import numpy as np
import time
import torch.nn as nn
import sys
import json
import os
import shutil


global EPOCHS, BATCH_SIZE_RATIO, SEQUENCE_LEN, LEARNING_RATE, TOKENIZER, MODEL_NAME
import torch.nn.functional as F
from openpyxl import load_workbook

projectnum = 2

RESULT_FILE_PATH = r'./model/open.xlsx'


wb = load_workbook(RESULT_FILE_PATH)
sheet = wb.active
EPOCHS = 40
BATCH_SIZE_RATIO = 0.3
SEQUENCE_LEN = 20
LEARNING_RATE = 5e-4
# define device
global DEVICE
DEVICE = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")

# define files to be used
global DATA_PATH
DATA_PATH = r'./model/open/'
ROW_MAE, ROW_MMRE, ROW_PRED = 3, 4, 5

global MODEL_SAVE_PATH
MODEL_SAVE_PATH = r'./model/model/'

TRAIN_TEST_FILE_PAIRS = os.listdir(DATA_PATH)
TRAIN_TEST_FILE_PAIRS.sort(key=str.lower)

OUTPUT = ''
MODEL = None
DYNAMIC_BATCH = True
BATCH_SIZE = None
WITHIN_PROJECT = None
MAE_RECORDS = []
MDAE_RECORDS = []


def data_processing(file_pair):
    global BATCH_SIZE, BATCH_SIZE_RATIO, DATA_PATH, WITHIN_PROJECT, DYNAMIC_BATCH

    train_data = pd.DataFrame()
    fname = DATA_PATH + file_pair
    df = prepare_dataframe(fname)
    train_data = train_data._append(df)

    # data split
    if WITHIN_PROJECT:
        train_ex, train_text, train_labels, val_ex, val_text, val_labels, test_ex, test_text, test_labels = within_project_split(
            train_data)
    # define batch size dynamicalloutputsy based on training length
    if DYNAMIC_BATCH:
        BATCH_SIZE = int(len(train_text) * BATCH_SIZE_RATIO)
    # tokenization
    tokens_train = tokenization(train_text.tolist())
    tokens_val = tokenization(val_text.tolist())

    train_seq = torch.tensor(tokens_train['input_ids'])
    train_ex = np.array(train_ex)
    train_ex = torch.tensor(train_ex)
    train_y = torch.tensor(train_labels.tolist()).type(torch.FloatTensor)
    train_seq = torch.cat((train_ex, train_seq), dim=1)
    train_dataloader = prepare_dataloader(train_seq, train_y, sampler_type='random')

    val_seq = torch.tensor(tokens_val['input_ids'])
    val_ex = np.array(val_ex)
    val_ex = torch.tensor(val_ex)
    val_y = torch.tensor(val_labels.tolist()).type(torch.FloatTensor)
    val_seq = torch.cat((val_ex, val_seq), dim=1)
    val_dataloader = prepare_dataloader(val_seq, val_y, sampler_type='sequential')

    # prepare testing datasets
    all_test_dataloader = []
    test_file_names = []
    if WITHIN_PROJECT:
        tokens_test = tokenization(test_text.tolist())
        test_seq = torch.tensor(tokens_test['input_ids'])
        test_ex = np.array(test_ex)
        test_ex = torch.tensor(test_ex)
        test_seq = torch.cat((test_ex, test_seq), dim=1)
        test_y = torch.tensor(test_labels.tolist()).type(torch.FloatTensor)
        test_dataloader = prepare_dataloader(test_seq, test_y, sampler_type='sequential')
        all_test_dataloader.append(test_dataloader)
        test_file_names.append(file_pair)
        return file_pair, train_dataloader, val_dataloader, all_test_dataloader, test_file_names


def tokenization(text_list):
    tokenizer = BertTokenizer.from_pretrained("bert-base-cased")
    return tokenizer(text_list, truncation=True, max_length=SEQUENCE_LEN, padding='max_length')


def prepare_dataframe(file_name):
    data = pd.read_csv(file_name)
    order = ['Assignee_count', 'Reporter_count', 'Creator_count', 'Summary', 'Custom field (Story Points)']
    data = data[order]
    data = data.fillna(0)
    return pd.DataFrame(data=data)


def prepare_dataloader(seq, y, sampler_type):
    global BATCH_SIZE
    tensor_dataset = TensorDataset(seq, y)
    if sampler_type == 'random':
        sampler = RandomSampler(tensor_dataset)
    elif sampler_type == 'sequential':
        sampler = SequentialSampler(tensor_dataset)
    dataloader = DataLoader(tensor_dataset, sampler=sampler, batch_size=BATCH_SIZE)
    return dataloader


def within_project_split(data):
    # print('within project split!')
    train_val_split_point = int(len(data) * 0.6)
    val_test_split_point = int(len(data) * 0.8)
    train_ex = data.iloc[:train_val_split_point, 0:3]
    train_text = data['Summary'][:train_val_split_point]
    train_labels = (data['Custom field (Story Points)'][:train_val_split_point])
    val_ex = data.iloc[train_val_split_point:val_test_split_point, 0:3]
    val_text = data['Summary'][train_val_split_point:val_test_split_point]
    val_labels = (data['Custom field (Story Points)'][train_val_split_point:val_test_split_point])
    test_ex = data.iloc[val_test_split_point:, 0:3]
    test_text = data['Summary'][val_test_split_point:]
    test_labels = (data['Custom field (Story Points)'][val_test_split_point:])
    return train_ex, train_text, train_labels, val_ex, val_text, val_labels, test_ex, test_text, test_labels


class BertForSequence(nn.Module):
    def __init__(self):
        super(BertForSequence, self).__init__()
        self.bert = BertModel.from_pretrained("bert-base-cased")
        for name, param in self.bert.named_parameters():
            param.requires_grad = False
        self.hidden1 = nn.Linear(768, 3)
        self.hidden2 = nn.Linear(6, 50)
        self.score = nn.Linear(50, 1)

    def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
        outputs_bert = self.bert(input_ids[:, 3:].long(), token_type_ids, attention_mask)
        outputs = outputs_bert.last_hidden_state[:, 0, :]
        outputs = self.hidden1(outputs)
        outputs = torch.cat((input_ids[:, 0:3], outputs), dim=1)
        outputs = torch.relu(self.hidden2(outputs.float()))
        logit = self.score(outputs)
        return logit


def train_eval_test(file_pair, train_dataloader, val_dataloader, all_test_dataloader, model, test_file_names):
    global LEARNING_RATE, EPOCHS, MAE_RECORDS, MDAE_RECORDS, DEVICE
    optimizer = AdamW(MODEL.parameters(), lr=LEARNING_RATE)
    # Total number of training steps is [number of batches] x [number of epochs]
    total_steps = len(train_dataloader) * EPOCHS
    # Create the learning rate scheduler
    scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0, num_training_steps=total_steps)
    # print("Start training for ", file_pair, ".....")
    training_start_time = time.time()

    all_predictions = []

    min_eval_loss_epoch = [10000, 0]

    time_records = []
    MAE_RECORDS = []
    MDAE_RECORDS = []
    MMRE_RECORDS = []
    PRED_RECPRDS = []
    start_time = time.time()
    loss_fct = nn.L1Loss()
    for e in range(EPOCHS):
        print(f"DEBUG: 开始 Epoch {e}", flush=True)
        # ---TRAINING---
        # clean GPU memory
        torch.cuda.empty_cache()
        # print(">>> epoch ", e)
        # set model into train mode
        model.train()
        total_train_loss = 0
        for step, batch in enumerate(train_dataloader):
            b_input_ids = batch[0].to(DEVICE)
            b_labels = batch[1].to(DEVICE)
            model.zero_grad()
            result = model(b_input_ids,
                           labels=b_labels,
                           )
            loss = loss_fct(result, b_labels)
            logits = result
            total_train_loss += loss.item()
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
            optimizer.step()
            scheduler.step()
            # clean memory
            del step, batch, b_input_ids, b_labels, result, loss, logits

        avg_train_loss = total_train_loss / len(train_dataloader)
        # print(" Average training MAE loss: {0:.2f}".format(avg_train_loss))
        # clean memory
        del avg_train_loss, total_train_loss

        time_records.append(time.time() - start_time)

        # ---EVAL---
        # print("-")
        # set model into eval mode
        model.eval()
        total_eval_loss = 0
        for batch in val_dataloader:
            b_input_ids = batch[0].to(DEVICE)
            b_labels = batch[1].to(DEVICE)
            model.zero_grad()
            result = model(b_input_ids,
                           labels=b_labels,
                           )
            loss = loss_fct(result, b_labels)
            logits = result
            total_eval_loss += loss.item()
            # clean memory
            del b_input_ids, b_labels, batch, result, loss, logits
        avg_eval_loss = total_eval_loss / len(val_dataloader)
        # print(" Average eval MAE loss: {0:.2f}".format(avg_eval_loss))

        if avg_eval_loss <= min_eval_loss_epoch[0]:
            min_eval_loss_epoch[0] = avg_eval_loss
            min_eval_loss_epoch[1] = e

        # clean memory
        del avg_eval_loss, total_eval_loss
        # save model state to dict


        # print("===============================")

        # testing on holdout data
        index = 0
        for test_dataloader in all_test_dataloader:
            test_file_name = test_file_names[index]
            index += 1
            testing_start_time = time.time()
            predictions = []
            true_labels = []
            for batch in test_dataloader:
                batch = tuple(t.to(DEVICE) for t in batch)
                b_input_ids, b_labels = batch
                with torch.no_grad():
                    logits = model(b_input_ids)
                logits = logits.detach().cpu().numpy()
                label_ids = b_labels.to('cpu').numpy()
                predictions.append(logits)
                true_labels.append(label_ids)

            # 将预测结果保存到列表
            all_predictions = []
            for i in range(len(predictions)):
                for j in range(len(predictions[i])):
                    task_prediction = predictions[i][j][0]  # 预测工作量的值
                    task_true_label = true_labels[i][j]  # 实际工作量
                    all_predictions.append({
                        'True_Workload': task_true_label,
                        'Predicted_Workload': task_prediction
                    })
            # 将预测结果保存为 CSV 文件
            if (e+1)%5 == 0 or e == EPOCHS-1:

                folder_name = os.path.splitext(test_file_name)[0]  # 获取文件名（不含扩展名）
                folder_path = f'./model/pred_results/{folder_name}'
                if not os.path.exists(folder_path):
                    os.makedirs(folder_path)
                predictions_df = pd.DataFrame(all_predictions)
                csv_file_path = f'{folder_path}/predictions_epoches{e}.csv'
                predictions_df.to_csv(csv_file_path, index=False)
                # print(f"Predictions saved successfully to '{csv_file_path}'.")

                #保存每个epochs的模型
                # 确保目录存在
                model_dir = f'./model/model/'
                if not os.path.exists(model_dir):
                    os.makedirs(model_dir)

                # 指定完整的模型文件路径
                model_save_path = os.path.join(model_dir, f'{e+1}.pt')

                # 保存模型
                torch.save(model.state_dict(), model_save_path)
                # print(f'Model saved to {model_save_path}')

            # calculate errors
            total_distance = 0
            total_mre = 0
            m = 0
            distance_records = []
            total_data_point = 0
            for i in range(len(predictions)):
                total_data_point += len(predictions[i])
            for i in range(len(predictions)):
                for j in range(len(predictions[i])):
                    distance = abs(predictions[i][j] - true_labels[i][j])
                    if (true_labels[i][j] > 0):
                        mre = abs(predictions[i][j] - true_labels[i][j]) / true_labels[i][j]
                    else:
                        mre = (abs(predictions[i][j] - true_labels[i][j]) + 1) / (true_labels[i][j] + 1)
                    if mre < 0.5:
                        m += 1
                    total_mre += mre
                    total_distance += distance
                    distance_records.append(distance)
            MAE = total_distance / total_data_point
            MMRE = total_mre / total_data_point
            MdAE = np.median(np.array(distance_records))
            PRED = m / total_data_point
            MAE_RECORDS.append(MAE)
            MDAE_RECORDS.append(MdAE)
            MMRE_RECORDS.append(MMRE)
            PRED_RECPRDS.append(PRED)

            global OUTPUT
            OUTPUT += 'Epochs ' + str(e) + '\n'
            OUTPUT += 'MAE: ' + str(MAE) + '\n'
            OUTPUT += 'MdAE: ' + str(MdAE) + '\n'
            OUTPUT += 'MMRE: ' + str(MMRE) + '\n'
            OUTPUT += 'PRED: ' + str(PRED) + '\n\n'
            # print('MAE: ', MAE)
            # print('MdAE: ', MdAE)
            # print('MMRE: ', MMRE)
            # print('PRED: ', PRED)

            # === 将每个 epoch 的指标以 JSON 输出发送到 stdout，用于 WebSocket consumers 捕获 ===
            epoch_metrics = {
                'epoch': int(e+1),
                'mae': round(float(MAE), 4),
                'mmre': round(float(MMRE), 4),
                'mdae': round(float(MdAE), 4),
                'pred': round(float(PRED) * 100, 2)  # 转为百分比
            }
            print("METRICS_JSON:" + json.dumps(epoch_metrics), flush=True)

    OUTPUT += str(MAE_RECORDS[min_eval_loss_epoch[1]]) + '\n' + str(MMRE_RECORDS[min_eval_loss_epoch[1]]) + '\n' + str(
        PRED_RECPRDS[min_eval_loss_epoch[1]]) + '\n'
    OUTPUT += 'training time: ' + str(time_records[min_eval_loss_epoch[1]]) + '\n'
    OUTPUT += 'Epochs: ' + str(min_eval_loss_epoch[1]) + '\n'
    global BATCH_SIZE
    OUTPUT += 'batch size: ' + str(BATCH_SIZE)
    #print('all done for one project')
    sheet.cell(row=ROW_MAE, column=projectnum).value = MAE_RECORDS[min_eval_loss_epoch[1]][0]
    sheet.cell(row=ROW_MMRE, column=projectnum).value = MMRE_RECORDS[min_eval_loss_epoch[1]][0]
    sheet.cell(row=ROW_PRED, column=projectnum).value = PRED_RECPRDS[min_eval_loss_epoch[1]]
    wb.save(RESULT_FILE_PATH)


WITHIN_PROJECT = True


def clear_folders():
    folders = ["./model/model", "./model/pred_output", "./model/pred_results", "./model/result_bert"]

    for folder in folders:
        if os.path.exists(folder):
            for filename in os.listdir(folder):
                file_path = os.path.join(folder, filename)
                try:
                    if os.path.isfile(file_path) or os.path.islink(file_path):
                        os.remove(file_path)  # 删除文件或符号链接
                    elif os.path.isdir(file_path):
                        shutil.rmtree(file_path)  # 递归删除文件夹
                except Exception as e:
                    print(f"删除 {file_path} 时出错: {e}")


def main():
    global TRAIN_TEST_FILE_PAIRS, MODEL, TOKENIZER, MODEL_NAME
    # print('进入 FineSE')
    clear_folders()
    # 解析传入的参数
    if len(sys.argv) > 1:
        params_json = sys.argv[1]
        params_dict = json.loads(params_json)

        global EPOCHS, BATCH_SIZE_RATIO, SEQUENCE_LEN, LEARNING_RATE
        EPOCHS = int(params_dict.get("epochs", 20))  # 设置默认值
        BATCH_SIZE_RATIO = float(params_dict.get("batch_size_ratio", 0.3))
        SEQUENCE_LEN = int(params_dict.get("squence_len", 20))
        LEARNING_RATE = float(params_dict.get("learning_rate", 0.0005))

        print("\n=== 训练参数 ===")
        print("训练轮数:", EPOCHS)
        print("批次大小比例:", BATCH_SIZE_RATIO)
        print("序列长度:", SEQUENCE_LEN)
        print("学习率:", LEARNING_RATE)
    else:
        print("未接收到参数，使用默认值")

    for file in TRAIN_TEST_FILE_PAIRS:
        MODEL = BertForSequence()
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        MODEL = MODEL.to(device)
        file_pair, train_dataloader, val_dataloader, all_test_dataloader, test_file_names = data_processing(
            file_pair=file)
        train_eval_test(file_pair, train_dataloader, val_dataloader, all_test_dataloader, MODEL, test_file_names)

        # 模型保存路径
        # model_save_path = os.path.join(MODEL_SAVE_PATH, f'{file[:-4]}.pt')
        # torch.save(MODEL.state_dict(), model_save_path)
        # print(f'Model saved to {model_save_path}')

        del MODEL

        for item in os.listdir(DATA_PATH):
            item_path = os.path.join(DATA_PATH, item)
            if os.path.isfile(item_path):
                os.remove(item_path)  # 删除文件

        torch.cuda.empty_cache()

        global OUTPUT
        with open(f'./model/result_bert/{file_pair[:-4]}.txt', 'w+') as f:
            f.writelines(OUTPUT)
            # print('Results have been written into a text file!')
            OUTPUT = ""

        global projectnum
        projectnum += 1


if __name__ == '__main__':
    main()
