# -*- coding: utf-8 -*-
import numpy as np 
import pandas as pd
import matplotlib 
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import os
from typing import Generator
import torch
import fnmatch
import sys
sys.path.append("/gpfs/scratch/chgwang/XI/Scripts/Refactoring_1/MLModel")
import ParallelNet_1d # type: ignore


# 只画出需要的通道
def readFile(path:str, model:torch.nn.Module,
            oppsite=False):
    # our sample step.
    period = 0.02
    # this data can got from the header.
    # original sample rate
    with open(path, mode="r") as f:
        while True:
            line = f.readline()
            if "SampleRate" in line:
                break
    line = line.split(",")
    # read the frequency.
    source_freq = float(line[1].strip())
    resample_freq = 1e4
    freq_times = int(source_freq / resample_freq)
    period_points = int(source_freq * period)
    # get the labels
    path_splited = path.split("/")
    # start with . means is the hidden data
    if path_splited[-1][0] == ".":
        # print(path)
        return
    labels = []
    if path_splited[-1][0] == "0":
        labels.append("0")
    elif not path_splited[-1][0].isdigit():
        labels.append("0")
    else:
        labels.append(path_splited[-1][0])
        # isdigit is a function for str.m
        if path_splited[-1][1].isdigit():
            labels.append(path_splited[-1][1])
    sour_data = np.loadtxt(path, skiprows=16, delimiter=",", usecols=range(1,4))
    # default sample point number of original data is (125000, 3)
    assert sour_data.shape[0] == 125000
    # take the oppsite number.
    if oppsite:
        sour_data = -sour_data
    modeled_data = sour_data[::freq_times, :3]
    modeled_data = np.transpose(modeled_data)
    modeled_data = torch.tensor(modeled_data)
    delimiter = int(modeled_data.shape[1] / 2)
    modeled_label_list = []
    model.eval()
 
    for i in range(modeled_data.shape[1]-199):
        splited_data = modeled_data[:,i:i+200]
        splited_data = splited_data.permute(1,0)
        splited_data = torch.unsqueeze(splited_data, 0)
        splited_data = torch.unsqueeze(splited_data, 0)
        with torch.no_grad():
            modeled_label = model(splited_data.float())
        modeled_label = modeled_label.cpu().numpy()
        modeled_label = np.squeeze(modeled_label)
        modeled_label_list.append(modeled_label)
    modeled_labels = np.stack(modeled_label_list)
    modeled_labels = np.transpose(modeled_labels)
    labels = np.unique(labels)
    labels_list = []
    for label in labels:
        if label != 0:
            first_part = np.zeros(delimiter-200)
            second_part = np.ones(modeled_labels.shape[1]-(delimiter-200))
            labeled_arr =  np.append(first_part, second_part)
        else:
            labeled_arr = np.zeros(modeled_labels.shape[1])
        labels_list.append(labeled_arr)
    # visualize the labels
    modeled_data = modeled_data.numpy()
    markers = ["b", "r:", "y--"]
    fig_path = path.replace(".csv", ".png")
    fig, axes = plt.subplots(nrows=len(labels) + 1, ncols=1, 
                sharex=True)
    axes[0].plot(modeled_data[0,:], markers[0], label="ia")
    axes[0].plot(modeled_data[1,:], markers[1], label="ib")
    axes[0].plot(modeled_data[2,:], markers[2], label="ic")
    x_labeled = np.arange(199, modeled_data.shape[1])
    for index, label in enumerate(labels):
        label = int(label)
        axes[index+1].plot(x_labeled,
                        labels_list[index], 
                        markers[0], label="experiment")
        if label != 0:
            axes[index+1].plot(x_labeled, 
                            modeled_labels[label-1, :], 
                            markers[1], label="model")
        else:
            axes[index+1].plot(x_labeled, 
                            np.zeros_like(labels_list[index]), 
                            markers[1], label="model")
    fig.savefig(fig_path)
    plt.close()    
        

def retrieve_files(path:str) -> Generator:
    path_gen = os.walk(path)
    for root, _, files in path_gen:
        for name in files:
           yield(os.path.join(root,name))


if __name__ == "__main__":
    # oppsite
    path_0 = "/gpfs/scratch/chgwang/XI/data/论文展示的数据/1.整流部分---实验二"
    # non-oppsite 
    path_1 = "/gpfs/scratch/chgwang/XI/data/论文展示的数据/2.逆变部分---第三次实验"
    path_2 = "/gpfs/scratch/chgwang/XI/data/论文展示的数据/3.特殊情况"
    model_path = "/gpfs/scratch/chgwang/XI/DataBase/Model_2d_display/PN-1-0.069-0.124.pt"
    model = ParallelNet_1d.ParallelNet_1d(output_size=6)
    trained_dict = torch.load(model_path, map_location="cpu")
    if "model_state_dict" in trained_dict:
        trained_dict = trained_dict["model_state_dict"]
    model.load_state_dict(trained_dict, strict=True)
    readFile("/gpfs/scratch/chgwang/XI/data/论文展示的数据/1.整流部分---实验二/12.csv", model)

    for file in retrieve_files(path_0):
        if not fnmatch.fnmatch(file, "*.csv"):
            continue
        try:
            readFile(file, model, oppsite=True)
        except:
            print(file)
    for file in retrieve_files(path_1):
        if not fnmatch.fnmatch(file, "*.csv"):
            continue
        try:
            readFile(file, model)
        except:
            print(file)
    for file in retrieve_files(path_2):
        if not fnmatch.fnmatch(file, "*.csv"):
            continue
        try:
            readFile(file, model)
        except:
            print(file)
            raise