# -*- coding: utf-8 -*-
# 数据应该跨故障点
# 所有的故障点都出现在最中间的位置。
from typing import Generator
import scipy.io as scio
import numpy as np
import pandas as pd 
import os
import fnmatch
import pymongo


def readFile(path:str,col:pymongo.collection.Collection,
            oppsite=False):
    # our sample step.
    period = 0.02
    # this data can got from the header.
    # original sample rate
    with open(path, mode="r") as f:
        while True:
            line = f.readline()
            if "SampleRate" in line:
                break
    line = line.split(",")
    # read the frequency.
    source_freq = float(line[1].strip())
    period_points = int(source_freq * period)
    # get the labels
    path_splited = path.split("/")
    # start with . means is the hidden data
    if path_splited[-1][0] == ".":
        # print(path)
        return
    labels = []
    if path_splited[-1][0] == "0":
        labels.append("0")
    elif not path_splited[-1][0].isdigit():
        labels.append("0")
    else:
        labels.append(path_splited[-1][0])
        # isdigit is a function for str.m
        if path_splited[-1][1].isdigit():
            labels.append(path_splited[-1][1])
    sour_data = np.loadtxt(path, skiprows=16, delimiter=",", usecols=range(1,4))
    # default sample point number of original data is (125000, 3)
    assert sour_data.shape == (125000, 3)
    # take the opposite number.
    if oppsite:
        sour_data = - sour_data
    # only using two periods data
    delimiter = int(sour_data.shape[0] / 2)
    # including the endpoint.
    try:
        data_slice = sour_data[delimiter-2*period_points:delimiter+1*period_points+1, :]
    except:
        print(period_points)
        return
    sample_points_amount = 3 * 200 + 1
    idx = np.linspace(0, 
                    data_slice.shape[0]-1,
                    sample_points_amount,
                    endpoint=True).astype(int)
    names = ["ia", "ib", "ic"]
    data_dict = {}
    for i, name in enumerate(names):
        data = data_slice[:,i]
        try:
            data_dict[name] = data[idx].tolist()
        except:
            raise
    for idx, label in enumerate(labels):
        labels[idx ] = np.append(np.zeros(2*200),
                                np.ones(201) * int(label))
        labels[idx] = labels[idx].tolist()
    data_dict["label"] = labels
    data_dict["type"] = "experiment"
    data_dict["location"] = path
    try:
        if  col.find_one({"location":path}) is not None:
            col.insert_one(data_dict)
    except:
        print(path)


# def retrieve_eles(path, path_list):
#     ele_list = os.listdir(path)
#     for ele in ele_list:
#         ele_path = os.path.join(path, ele)
#         if os.path.isfile(ele_path):
#             path_list.append(ele_path)
#         else:
#             return(ele_path, path_list)

def retrieve_files(path:str) -> Generator:
    path_gen = os.walk(path)
    for root, _, files in path_gen:
        for name in files:
           yield(os.path.join(root,name))
        

if __name__ == "__main__":
    scriptDir = os.path.dirname(os.path.realpath(__file__))
    basepath = os.path.dirname(os.path.dirname(scriptDir))
    basepath = os.path.dirname(basepath)
    dataDir = os.path.join(basepath, "data")
    dataDir = os.path.join(dataDir, "邓茜实验数据")
    client = pymongo.MongoClient("mongodb://127.0.0.1:27017/")
    db  = client["Power_Fault"]
    col_sour = db["data_sour"]
    # oppsite
    path_0 = "/gpfs/scratch/chgwang/XI/data/论文展示的数据/1.整流部分---实验二"
    # non-oppsite 
    path_1 = "/gpfs/scratch/chgwang/XI/data/论文展示的数据/2.逆变部分---第三次实验"
    path_2 = "/gpfs/scratch/chgwang/XI/data/论文展示的数据/3.特殊情况"
    file_gen = retrieve_files(path_0)
    for file in file_gen:
        if not fnmatch.fnmatch(file, "*.csv"):
            continue
        try:
            readFile(file, col_sour, oppsite=True)
        except:
            print(file)
    for file in retrieve_files(path_1):
        if not fnmatch.fnmatch(file, "*.csv"):
            continue
        try:
            readFile(file, col_sour)
        except:
            print(file)
    for file in retrieve_files(path_2):
        if not fnmatch.fnmatch(file, "*.csv"):
            continue
        try:
            readFile(file, col_sour)
        except:
            print(file)
                
