# -*- coding: utf-8 -*-
import numpy as np 
import pandas as pd
from tqdm import tqdm  
from torch import nn 
# import matplotlib 
# matplotlib.use("Agg")
# import matplotlib.pyplot as plt
import os
from typing import Generator
import torch
import fnmatch
import sys
sys.path.append("/gpfs/scratch/chgwang/XI/Scripts/Refactoring_1/MLModel")
import lstm_net # type: ignore

def readFile(path:str,
            oppsite=False):

    with open(path, mode="r") as f:
        while True:
            line = f.readline()
            if "SampleRate" in line:
                break
    line = line.split(",")
    # read the frequency
    source_freq = float(line[1].strip())
    # our sample rate
    resample_freq = 1e4
    freq_times = int(source_freq / resample_freq)
    # get the labels
    path_splited = path.split("/")
    # start with . means is the hidden data
    if path_splited[-1][0] == ".":
        # print(path)
        return
    labels = []
    if path_splited[-1][0] == "0":
        labels.append("0")
    elif not path_splited[-1][0].isdigit():
        labels.append("0")
    else:
        labels.append(path_splited[-1][0])
        # isdigit is a function for str.m
        if path_splited[-1][1].isdigit():
            labels.append(path_splited[-1][1])
    # convert data to int
    labels = np.array(labels, dtype=np.int16)
    sour_data = np.loadtxt(path, skiprows=16, delimiter=",", usecols=range(1,4))
    assert sour_data.shape[0] == 125000
    # take the oppsite number.
    if oppsite:
        sour_data = -sour_data
    modeled_data = sour_data[::freq_times, :3]
    # modeled_data = np.transpose(modeled_data)
    delimiter = int(modeled_data.shape[0] / 2)
    modeled_label_arr = np.zeros((6, modeled_data.shape[0]))
    modeled_label_arr = np.transpose(modeled_label_arr)
    for label in labels:
        if label != 0:
            label = label - 1
            # using broadcast property
            modeled_label_arr[delimiter:, label] = 1
    # the modeled_data shape is (seq_len, feature)
    # the modeled_label_arr shape is (feature_num, seq_len)
    return(modeled_data, modeled_label_arr)

def retrieve_files(path:str) -> Generator:
    path_gen = os.walk(path)
    for root, _, files in path_gen:
        for name in files:
           yield(os.path.join(root,name))

def data_iter(path_list:list)->Generator:
    for path_num, path in enumerate(path_list):
        for file in retrieve_files(path):
            if not fnmatch.fnmatch(file, "*.csv"):
                continue
            try:
                if path_num == 0:
                    inp_seq, label = readFile(file, oppsite=True)
                else:
                    inp_seq, label = readFile(file)
            except:
                print(file)
            inp_seq = np.expand_dims(inp_seq, axis=0)
            label = np.expand_dims(label, axis=0)
            yield(inp_seq, label)
# evaluation model
def eval_model(model, data_iter, loss, device):
    model.eval()
    loss_list = []
    for out_seq, label in data_iter:
        out_seq = out_seq.to(device)
        label = label.to(device)
        # out_seq = torch.unsqueeze(out_seq, 1)
        with torch.no_grad():
            modeled_label = model(out_seq)
            l = loss(modeled_label, label)
            loss_list.append(l.cpu().item())
    return np.mean(loss_list)

# initization model
def xavier_init_weights(m):
    if type(m) == nn.Linear:
        torch.nn.init.xavier_uniform_(m.weight)
    if type(m) == nn.GRU or type(m) == nn.LSTM:
        for param in m._flat_weights_names:
            if "weight" in param:
                torch.nn.init.xavier_uniform_(m._parameters[param])
    if type(m) == nn.Conv2d:
        torch.nn.init.xavier_uniform_(m.weight)

def train_model(model, lr, l2_penalty, num_epochs, device,
                path_list, train_times):
    loss = nn.BCEWithLogitsLoss()
    lambdaLR = lambda epoch: 0.90 ** (epoch)
    optimizer = torch.optim.RMSprop(model.parameters(), lr=lr, 
                    weight_decay=l2_penalty)
    scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, 
                    lr_lambda=lambdaLR)
    for epoch in range(num_epochs):
        print(f"Epoch: {epoch+1}")
        train_pbar = tqdm(total=train_times)
        model.train()
        loss_list = []
        train_iter = data_iter(path_list)
        for inp_seq, target_label in train_iter:
            train_pbar.update(1)
            inp_seq = torch.from_numpy(inp_seq)
            inp_seq = inp_seq.float()
            target_label = torch.from_numpy(target_label)
            target_label = target_label.float()
            inp_seq = inp_seq.to(device)
            target_label = target_label.to(device)
            optimizer.zero_grad()
            # the state of lstm is not necessay.
            modeled_label, _ = model(inp_seq)
            l = loss(modeled_label, target_label)
            l.backward()
            loss_list.append(l.cpu().item())
            optimizer.step()
        train_pbar.close()

        # set data storage location
        scriptPath = os.path.realpath(__file__)
        basedir = os.path.dirname(os.path.dirname(scriptPath))
        basedir = os.path.dirname(basedir)
        basedir = os.path.dirname(basedir)
        datadir = os.path.join(basedir, "DataBase")
        modeldir = os.path.join(datadir, "Model_lstm")
        tra_loss = np.mean(loss_list)
        # # evaluate the model.
        # tra_loss = eval_model(model,
        #                       train_iter,
        #                       loss,
        #                       device)
        scheduler.step()
        model_path = os.path.join(modeldir, "PN-%d-%.3f.pt"%(epoch, tra_loss))
        torch.save({
            'epoch': epoch,
            'model_state_dict': model.state_dict(),
            'optimizer_state_dict': optimizer.state_dict(),
            'loss': loss,}, model_path)
        print(f"Epoch:{epoch+1} training loss: {tra_loss:.3f}")

        
if __name__ == "__main__":
    # oppsite
    path_0 = "/gpfs/scratch/chgwang/XI/data/论文展示的数据/1.整流部分---实验二"
    # non-oppsite 
    path_1 = "/gpfs/scratch/chgwang/XI/data/论文展示的数据/2.逆变部分---第三次实验"
    path_2 = "/gpfs/scratch/chgwang/XI/data/论文展示的数据/3.特殊情况"
    # model_path = "/gpfs/scratch/chgwang/XI/DataBase/Model_1d/PN-18-0.959-0.942.pt"
    # model = ParallelNet_1d.ParallelNet_1d(output_size=6)
    # trained_dict = torch.load(model_path, map_location="cpu")
    # if "model_state_dict" in trained_dict:
    #     trained_dict = trained_dict["model_state_dict"]
    # model.load_state_dict(trained_dict, strict=True)
    inp_seq, target_label = readFile("/gpfs/scratch/chgwang/XI/data/论文展示的数据/1.整流部分---实验二/12.csv")
    # print(np.count_nonzero(target_label))
    
    # model setting
    input_dim = 3
    hidden_size = 1024
    num_layers = 4
    out_dim = 6
    model = lstm_net.lstm_net(input_size=input_dim,
    hidden_size=hidden_size, num_layers=num_layers, out_dim=out_dim)
    # training setting
    lr = 1e-2
    l2_penalty = 0
    num_epochs = 20
    device=torch.device("cpu")
    path_list=[path_0, path_1, path_2]
    # train_iter = data_iter()
    train_model(model, lr, l2_penalty, num_epochs, device, path_list, 28)

