# -*- coding: utf-8 -*-
import numpy as np
import pymongo
import sys
from torch.nn.utils.rnn import pad_sequence
import torch
import math
import os
import typing
import pandas as pd
import multiprocessing as mp

def convert_one_hot(label_list:np.array):
    label_one_hot = torch.zeros(6)
    if len(label_list.shape) == 0:
        label = np.int(label_list)
        label = label - 1
        if label == -1:
            return(label_one_hot)
        else:
            label_one_hot[label] = 1
            return(label_one_hot)
    for label in label_list:
        # 编码问题
        label = label - 1
        # label = -1 认为没有任何损坏
        if label == -1:
            continue
        label_one_hot[label] = 1
    # the label_one_hot dtype is torch.float32
    return(label_one_hot)

def normalize(data:np.array):
    data = np.array(data)
    max_value = np.max(data)
    min_value = np.min(data)
    if max_value == min_value:
        data = np.ones_like(data)
    else:
        data = (data-min_value) / (max_value - min_value) 
    return(data)

def getId(id, offset):
    # timeAxis = np.arange(start, end, sampleInterval)
    client = pymongo.MongoClient("mongodb://127.0.0.1:27017/")
    db = client["Power_Fault"]
    col_sour = db["data_sour"]
    id_dict = col_sour.find_one({"_id":id})
    # if the id do not exist in id_dict.})
    period_points = 200
    # timeAxis = id_dict["time"]
    # timeAxis = timeAxis[offset:offset+period_points]
    id_data = np.empty((period_points, 0))
    nodes = ["ia", "ib", "ic"]
    for node in nodes:
        data = id_dict[node]
        #  unable to reach offset+period_points
        data = data[offset:offset+period_points]
        data = np.array(data)[:,np.newaxis]
        id_data = np.append(id_data, data, axis=1)
    id_data = normalize(id_data)
    id_data = np.transpose(id_data)
    id_data = torch.tensor(id_data, dtype=torch.float32)
    id_label = np.array(id_dict["label"]).astype(int)
    id_label = np.squeeze(id_label[:,offset+period_points-1])
    id_label = convert_one_hot(id_label)
    return(id_data, id_label)

def getPerIds(queue, ids, offset):
    id_data_list = []
    id_labels = []
    # it implies that have empty value.
    for id in ids:
        id_data, id_label = getId(id, offset)
        id_data_list.append(id_data)
        id_labels.append(id_label)
    queue.put((id_data_list, id_labels))

def getBatchData(map_ids:list, num_worker:int, offset:int):
    per_id_base = len(map_ids) // num_worker
    per_id_remainder = len(map_ids) % num_worker
    perpared_batch_queue = mp.Manager().Queue(num_worker)
    pros = []
    batch_data = []
    batch_labels = []
    for i in range(num_worker):
        if i < per_id_remainder:
            per_id = per_id_base + 1
        else:
            per_id = per_id_base
        per_pro_ids = map_ids[i*per_id:(i+1)*per_id]
        getPerIds_pro = mp.Process(target=getPerIds, args=(perpared_batch_queue, per_pro_ids, offset))
        getPerIds_pro.start()
        pros.append(getPerIds_pro)
    for _ in range(num_worker):
        # only using the queue when it is not empty.
        # otherwise, it is will have a endless loop.
        # it is not worked well in multiprocess module.
        # if not perpared_batch_queue.empty():
        id_data_list, id_labels = perpared_batch_queue.get()
        batch_data.extend(id_data_list)
        batch_labels.extend(id_labels)
    for pro in pros:
        pro.join()
    batch_data = torch.stack(batch_data)
    batch_labels = torch.stack(batch_labels)
    return(batch_data, batch_labels)


def gen_seq(ids:list, batch_size:int, num_worker:int,
            offset=0, return_ids=False):
    batch_amount = math.ceil(len(ids) / batch_size)
    for i in range(batch_amount):
        map_ids = ids[i*batch_size:min(len(ids), (i+1)*batch_size)]
        out_seq, label = getBatchData(map_ids, num_worker, offset)
        if return_ids:
            yield(map_ids, out_seq, label)
        else:
            yield(out_seq, label)

if __name__ == "__main__":
    client = pymongo.MongoClient("mongodb://127.0.0.1:27017/")
    db  = client["Power_Fault"]
    col_sour = db["data_sour"]
    # # all ID of the data, split data by ID
    ids = col_sour.distinct("_id")


    id = ids[0]
    data, label = getId(id, offset=0)
    print(data.size(), label.size())

    mapids = ids[:10]
    batch_data, batch_label = getBatchData(mapids, 4, offset=0)
    print(batch_data.size(), batch_label.size())

    ids = ids[:100]
    train_iter = gen_seq(ids, 10, num_worker=2)
    for seq, lab in train_iter:
        print(seq.size())
