import time,os,pickle,gzip
import multiprocessing as mp
from turtle import st
import pandas as pd
import numpy as np
import xml.etree.ElementTree as ET
from pathlib import Path
from utils import *
from itertools import chain
from feasytools import ReadOnlyTable, ArgChecker, time2str

# 应用多进程技术加速数据预处理
# 对于无V2G的数据集，输入文件为行程信息和充电价格，输出文件为充电负荷
# 对于有V2G的数据集，输入文件为行程信息、充电价格和放电价格，输出文件为充电负荷、V2G放电负荷和最大可能的V2G放电功率

def get_trips(roots:Union[str,ET.Element])->'tuple[list[str],np.ndarray]':
    '''获取行程信息'''
    if isinstance(roots,str):
        root=ET.ElementTree(file=roots+"/veh.xml").getroot()
    else:
        root=roots
    names_set:set[str]=set()
    names_map:dict[str,int]={}
    for veh in root:
        for trip in veh:
            st,ed=trip.attrib["route_edges"].split(" ")
            names_set.add(st);names_set.add(ed)
    names:list[str]=list(names_set)
    names.sort()
    heads=["time"]
    for i,e in enumerate(names,start=1):
        heads.append(e+"#st")
        heads.append(e+"#ed")
        names_map[e+"#st"]=i*2-1
        names_map[e+"#ed"]=i*2
    TLen=7*24*60
    data=np.zeros((TLen,1+len(names)*2),np.int32)
    data[:,0]=np.linspace(0,TLen,num=TLen,endpoint=False)
    for veh in root:
        for trip in veh:
            tm=int(trip.attrib["depart"])
            if tm<86400 or tm>=8*86400: continue
            tm=(tm-86400)//60
            st,ed=trip.attrib["route_edges"].split(" ")
            data[tm,names_map[st+"#st"]]+=1
            data[tm,names_map[ed+"#ed"]]+=1
    return heads, data

def __proc_price(node:ET.Element)->np.ndarray:
    '''将价格节点转化为价格序列'''
    el = []
    for itm in node:
        el.append((int(itm.attrib["btime"]),float(itm.attrib["price"])))
    TLen=7*24*60
    data = np.zeros((TLen),np.float32)
    el.append((TLen*60+86400,0.0))
    for i in range(len(node)):
        st = (el[i][0]-86400)//60
        ed = (el[i+1][0]-86400)//60
        if ed>=0:
            if st<0: st=0
            p = el[i][1]
            val = np.linspace(p,p,num=ed-st,endpoint=False)
            data[st:ed] = val
    return data        

def __get_price(data_path:str,v2g:bool,ispk:bool)->'tuple[list[str],np.ndarray]':
    '''获取充电站价格'''
    root=ET.ElementTree(file=data_path).getroot()
    heads = []
    data = []
    for cs in root:
        for node in cs:
            if node.tag == "pbuy":
                heads.append(cs.attrib["name"]+"#pb")
                data.append(__proc_price(node))
            elif node.tag == "psell" and v2g and ispk:
                heads.append(cs.attrib["name"]+"#ps")
                data.append(__proc_price(node))
    return heads, np.stack(data,axis=-1)

def proc_trips_and_prices(data_root:str,output_root:str,v2g:bool):
    '''处理行程信息和价格信息'''
    heads, data = get_trips(data_root)
    heads2, data2 = __get_price(data_root+"/cs.xml",v2g,False)
    heads3, data3 = __get_price(data_root+"/pk.xml",v2g,True)
    with open(output_root,"wb") as fp:
        fp.write(','.join(chain(heads,heads2,heads3)).encode())
        fp.write("\n".encode())
        for r1,r2,r3 in zip(data,data2,data3):
            to_write = [','.join(map(str,r1)),','.join(map(str,r2)),','.join(map(str,r3))]
            fp.write(','.join(to_write).encode())
            fp.write('\n'.encode())

def proc_loads(data_root:str,output_root:str,v2g:bool):
    '''处理负荷信息'''
    with gzip.open(data_root+"/time_line.pkl.gz",'rb') as fp:
        tl=np.array(pickle.load(fp))//60-1440
    tb=ReadOnlyTable(data_root+"/cspk.sdt",preload=True)
    data=tb._d
    data=pd.DataFrame(data=data,columns=tb.head)
    data.insert(0,"time",tl)
    for hn in tb.head:
        if ((hn.startswith("CS") or not v2g) and (hn.endswith("#d") or hn.endswith("#v2g"))) or hn.endswith("#cnt")  or hn.endswith("#pb")  or hn.endswith("#ps"):
            data.pop(hn)
    data=data.groupby("time").mean()
    data[1440:-1].to_csv(output_root)

def preproc_one(i:str,o:str,v2g:bool,mpQ:mp.Queue):
    op=Path(o)
    op.mkdir(parents=True,exist_ok=True)
    if not (op/"input.csv").exists(): proc_trips_and_prices(i,o+"/input.csv",v2g)
    if not (op/"output.csv").exists(): proc_loads(i,o+"/output.csv",v2g)
    mpQ.put_nowait("ok")

def preproc(input_root:str,output_root:str,v2g:bool):
    pool = mp.Pool(mp.cpu_count())
    mpQ = mp.Manager().Queue()
    data = os.listdir(input_root)
    for d in data:
        i,o=input_root+d,output_root+d
        pool.apply_async(preproc_one,args=(i,o,v2g,mpQ),error_callback=print)
    pool.close()
    fin_count = 0
    st_time = time.time()
    last_upd = 0
    while fin_count < len(data):
        try:
            res = mpQ.get(timeout=1)
            if res == "ok": fin_count += 1
            else: raise ValueError(f"Unknown result: {res}")
        except ValueError as e:
            raise e
        except:
            pass
        this_time = time.time()
        if this_time - last_upd >1:
            print(f"\rProgress: {fin_count}/{len(data)} Elasped: {time2str(this_time-st_time)}  ",end="")
            last_upd = this_time
    pool.join()
    print(f"\rAll done! Duration: {time2str(time.time()-st_time)}           ")

if __name__=="__main__":
    parser = ArgChecker()
    input_root = parser.pop_str('i')
    if not input_root.endswith("/") and not input_root.endswith("\\"): input_root += "/"
    output_root = parser.pop_str('o')
    if not output_root.endswith("/") and not output_root.endswith("\\"): output_root += "/"
    v2g = parser.pop_bool('v2g')
    print(f"input_root: {input_root}, output_root: {output_root}, v2g: {v2g}")
    preproc(input_root, output_root, v2g)
