import os
import sys   
import numpy as np  
import torch 
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import pandas as pd
import time
import sys  
from training.dl_frame.dl_model.my_lstm import LSTM, MLP_net, Cor_loss
from training.dl_frame.my_dataset import MyDataset_twoset
from my_tools import plot_line


class Train_process():
    def __init__(self, args) -> None:
        self.args = args
        self.device = self._acquire_device()
        self.model = self._build_model().to(self.device)
        
    def _build_model(self):
        model = MLP_net(input_size=len(self.args.feature_list),  back_len=self.args.back_len, horizon=self.args.horizon, seq_len=self.args.seq_len)
        # model = LSTM(input_size=len(self.args.feature_list),  back_len=self.args.back_len, horizon=self.args.horizon, seq_len=self.args.seq_len)
        if self.args.use_multi_gpu:
            model = nn.DataParallel(model, device_ids=self.args.device_ids)
        return model
    
    def _acquire_device(self):
        if self.args.use_gpu:
            os.environ["CUDA_VISIBLE_DEVICES"] = self.args.devices if self.args.use_multi_gpu else str(self.args.gpu)
            device = torch.device(f'cuda:{self.args.gpu}')
            print(f'Use GPU: cuda:{self.args.gpu}')
        else:
            device = torch.device('cpu')
            print('Use CPU')
        return device
    
    def _get_pretrain_data(self,  X_train, X_eval, Y_train,  Y_eval, times_train, times_eval):
        train_set = MyDataset_twoset(X_train, Y_train, self.args.back_len, self.args.horizon, self.args.seq_len, times_train)
        eval_set = MyDataset_twoset(X_eval, Y_eval, self.args.back_len, self.args.horizon, self.args.seq_len, times_eval)
        train_loader = DataLoader(train_set, batch_size=self.args.batch_size)
        eval_loader = DataLoader(eval_set, batch_size=self.args.batch_size)
        return train_loader, eval_loader
    
    def _get_finetune_data(self, X_ft_train, X_ft_eval, X_out, Y_ft_train,  Y_ft_eval, Y_out, times_train, times_eval, times_out):
        train_set = MyDataset_twoset(X_ft_train, Y_ft_train, self.args.back_len, self.args.horizon, self.args.seq_len, times_train)
        eval_set = MyDataset_twoset(X_ft_eval, Y_ft_eval, self.args.back_len, self.args.horizon, self.args.seq_len, times_eval)
        predict_set = MyDataset_twoset(X_out, Y_out,  self.args.back_len, self.args.horizon, self.args.seq_len, times_out )
        train_loader = DataLoader(train_set, batch_size=self.args.batch_size)
        eval_loader = DataLoader(eval_set, batch_size=1)
        predict_loader = DataLoader(predict_set, batch_size=1) 
        return train_loader,eval_loader,predict_loader

    def _select_optimizer(self):
        model_optim = optim.Adam(self.model.parameters(), lr=self.args.lr)
        return model_optim

    def _select_criterion(self):
        # criterion = nn.MSELoss()
        criterion = Cor_loss()
        return criterion
    
    def fintune_train(self,  X_ft_train, X_ft_eval, X_out, Y_ft_train,  Y_ft_eval, Y_out,  times_train, times_eval, times_out, root, pre_root):
        early_stop_epoch_cnt = 0
        best_val_loss = float("inf")
        train_loss_lst = []
        val_loss_lst = []
        self.model.load_state_dict(torch.load(f'{pre_root}/network.pth'))
        train_loader,eval_loader,predict_loader = self._get_finetune_data(X_ft_train, X_ft_eval, X_out, Y_ft_train,  Y_ft_eval, Y_out,  times_train, times_eval, times_out)
        train_steps = len(train_loader)
        model_optim = self._select_optimizer()
        scheduler = torch.optim.lr_scheduler.StepLR(model_optim, 1.0, gamma=0.98)
        criterion = self._select_criterion()
        for epoch in range(self.args.finetune_epoch):
            self.model.train()
            log_loss = 0
            train_loss = 0
            self.model.train()
            epoch_time = time.time()
            iter_time = time.time()
            for batch_index, (batch_x, batch_y) in enumerate(train_loader):
                model_optim.zero_grad()
                x_shape = batch_x.shape #batch_size, seq_len, codes_num, feature_name
                y_shape = batch_y.shape
                trainx = batch_x.view(-1,x_shape[1],x_shape[-1]).to(self.device).float()         
                trainy = batch_y.view(-1,y_shape[1],y_shape[-1]).to(self.device).float()     
                output = self.model(trainx)        
                loss = criterion(output, trainy) 
                loss.backward()
                torch.nn.utils.clip_grad_norm_(self.model.parameters(), 0.5)
                model_optim.step()
                
                log_loss += loss.item()
                train_loss += loss.item()
                if (batch_index+1)%self.args.log_eval == 0:
                    print('-' * 89)
                    print(f'| iter:{batch_index+1}/{train_steps}, | epoch:{epoch}, | loss:{(log_loss/self.args.log_eval):5.6f}, | lr:{scheduler.get_last_lr()[0]:5.6f}, | time_cost:{(time.time()-iter_time):5.2f}s ')  
                    iter_time = time.time()
                    log_loss = 0
            
            train_epoch_loss = train_loss/train_steps
            train_loss_lst.append(train_epoch_loss)
            
            if((epoch+1) % 2 == 0):
                self.predict_future(self.model, predict_loader,root, epoch)
            val_loss = self.evaluate(eval_loader, criterion, epoch)
            val_loss_lst.append(val_loss)
            
            print('-' * 89)
            print('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.6f} | '.format(epoch, (time.time() - epoch_time),
                                            val_loss, val_loss))
            print('-' * 89)
            scheduler.step()
            
            if val_loss < best_val_loss:
                best_val_loss = val_loss
                best_model = self.model 
                torch.save(best_model.state_dict(), f'{root}/network.pth')
            else:
                early_stop_epoch_cnt +=1            
            if early_stop_epoch_cnt > self.args.early_stop:
                torch.save(best_model.state_dict(), f'{root}/network.pth')#保存
                break 
            loss_root = f'{root}/loss.png'
            plot_line(train_loss_lst, val_loss_lst,  loss_root, 'train', 'evail')           
        return self.model
     
     
    def pre_train(self,  X_train, X_eval, Y_train,  Y_eval,  times_train, times_eval, root):
        # sourcery skip: low-code-quality
        early_stop_epoch_cnt = 0
        best_val_loss = float("inf")
        train_loss_lst = []
        val_loss_lst = []
        train_loader,eval_loader = self._get_pretrain_data(X_train, X_eval, Y_train,  Y_eval,  times_train, times_eval)
        train_steps = len(train_loader)
        model_optim = self._select_optimizer()
        scheduler = torch.optim.lr_scheduler.StepLR(model_optim, 1.0, gamma=0.98)
        criterion = self._select_criterion()
        for epoch in range(self.args.pretrain_epoch):
            self.model.train()
            log_loss = 0
            train_loss = 0
            self.model.train()
            epoch_time = time.time()
            iter_time = time.time()
            for batch_index, (batch_x, batch_y) in enumerate(train_loader):
                model_optim.zero_grad()
                model_optim.zero_grad()
                x_shape = batch_x.shape #batch_size, seq_len, codes_num, feature_name
                y_shape = batch_y.shape
                trainx = batch_x.view(-1,x_shape[1],x_shape[-1]).to(self.device).float()         
                trainy = batch_y.view(-1,y_shape[1],y_shape[-1]).to(self.device).float()     
                output = self.model(trainx) 
                # print(self.model.fc1.weight.data)                    
                loss = criterion(output, trainy) 
                # print(loss.item())
                loss.backward()
                torch.nn.utils.clip_grad_norm_(self.model.parameters(), 0.5)
                model_optim.step()
                
                log_loss += loss.item()
                train_loss += loss.item()
                if (batch_index+1)%self.args.log_eval == 0:
                    print('-' * 89)
                    print(f'| iter:{batch_index+1}/{train_steps}, | epoch:{epoch}, | loss:{(log_loss/self.args.log_eval):5.6f}, | lr:{scheduler.get_last_lr()[0]:5.6f}, | time_cost:{(time.time()-iter_time):5.2f}s ')  
                    iter_time = time.time()
                    log_loss = 0
            
            train_epoch_loss = train_loss/train_steps
            train_loss_lst.append(train_epoch_loss)
            
            if((epoch+1) % 10 == 0):
                self.predict_future(self.model, eval_loader, root, epoch)
            val_loss = self.evaluate(eval_loader, criterion, epoch)
            val_loss_lst.append(val_loss)
            
            print('-' * 89)
            print('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.6f} | '.format(epoch, (time.time() - epoch_time),
                                            val_loss, val_loss))
            print('-' * 89)
            scheduler.step()
            
            if val_loss < best_val_loss:
                best_val_loss = val_loss
                best_model = self.model 
                torch.save(best_model.state_dict(), f'{root}/network.pth')
            else:
                early_stop_epoch_cnt +=1            
            if early_stop_epoch_cnt > self.args.early_stop:
                torch.save(best_model.state_dict(), f'{root}/network.pth')#保存
                break 
            loss_root = f'{root}/loss.png'
            plot_line(train_loss_lst, val_loss_lst, loss_root, 'train', 'evail')           
        return self.model
     
    def evaluate(self,  eval_dataloader, criterion, epoch):
        self.model.eval() # Turn on the evaluation mode
        eval_loss = 0.
        eval_step = len(eval_dataloader)
        start_time = time.time()
        with torch.no_grad():
            for batch_index, (x, y) in enumerate(eval_dataloader):
                x_shape = x.shape #batch_size, seq_len, codes_num, feature_name
                y_shape = y.shape
                evalx = x.view(-1,x_shape[1],x_shape[-1]).to(self.device).float()            
                evaly = y.view(-1,y_shape[1],y_shape[-1]).to(self.device).float()      
                output = self.model(evalx)        
                loss = criterion(output, evaly) 
                eval_loss += loss.item()        
        return eval_loss/eval_step
    
    def predict_future(self, model, test_dataloader,root, steps='finally'):
        model.eval() # Turn on the testuation mode
        true_list = []
        pred_list = []
        with torch.no_grad():
            for batch_index, (x, y) in enumerate(test_dataloader):
                x_shape = x.shape #batch_size, seq_len, codes_num, feature_name
                y_shape = y.shape
                testx = x.view(-1,x_shape[1],x_shape[-1]).to(self.device).float()            
                testy = y.view(-1,y_shape[1],y_shape[-1]).to(self.device).float()      
                output =model(testx) 
                true = testy.view(y_shape)
                pred = output.view(y_shape) 
                true_list.append((true.view(y.shape).detach().cpu())[:,-self.args.horizon-1])
                pred_list.append((pred.view(y.shape).detach().cpu())[:,-self.args.horizon-1])
        true_ary = (torch.cat(true_list,dim=0)).numpy()
        pred_ary = (torch.cat(pred_list,dim=0)).numpy()           
        result_root = f'{root}/transformer-epoch{steps}.png'
        plot_line(pred_ary[:,0], true_ary[:,0], result_root)      
        return  pred_ary, true_ary
          



#迁移学习范式
        # params_group = [] # classifier
        # for k, v in model.named_parameters():
        #     # k:str 代表模块的名字，v:parameter 代表对应的参数
        #     if k[:3] == 'fc2':
        #         params_group.append({'params':v, 'lr': 0.01 }) 
        #     else:
        #         params_group.append({'params':v, 'lr': 0.001 })
                # v.requires_grad = False 
    
    
