#!/usr/bin/env python


import os
import csv
import json
import logging
import time
import datetime


from dlc import DLC
from multithread import multithread
from common import replace_id
from metadata_dt import MetaDataDT040 as mddt120
from metadata_d import MetaDataD040 as asset120
from loadcase import LoadCase
from multiprocessing import Pool

# logging.basicConfig(level=logging.DEBUG,
#                     format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
#                     datefmt='%a, %d %b %Y %H:%M:%S',
#                     filename='taskparser.log',
#                     filemode='w')

class Simulation(object):
    def __init__(self, sim_path):
        self.sim_path = os.path.realpath(sim_path)
        self.sim_name = os.path.basename(self.sim_path)
        self.loads_path = os.path.join(sim_path,'Loads')

        self.dlcs = {}
        for ds in self.__dlcs__():
            self.dlcs.update(ds)
        self.fields=[]

    @property
    def dlc_groups(self):
        groups=[]
        for p in os.listdir(self.loads_path):
            real_path = os.path.join(self.loads_path,p)
            if os.path.isdir(real_path):
                groups.append(real_path)
        return groups

    def __dlcs__(self):
        for dlc in self.dlc_groups:
            yield {os.path.basename(dlc):DLC(dlc)}
        # for rs in multithread(DLC,self.dlc_groups):
        #     yield {rs.dlc_name: rs}

    def files_analysis(self):
        describe_files  = {}
        dlc_files = []
        loadcase_total = 0
        for dlc_k,dlc_v in self.dlcs.items():
            dlc = 'DLC name: %s, loadcase group: %d\n'% (dlc_k, len(dlc_v.loadcases.keys()))
            loadcase_total += len(dlc_v.loadcases.keys())
            dsc = []
            for ld_k,ld_v in dlc_v.loadcases.items():
                str = '    Loadcase name: %s, describe_file num: %d, describe_vars num: %d\n'%(ld_k,
                                                                                             len(ld_v.file_analysis.keys()),
                                                                                             len(ld_v.variables.keys()))
                dsc.append(str)
                describe_files.update(ld_v.file_analysis)
            for dsc_file in sorted(dsc):
                dlc += dsc_file
            dlc_files.append(dlc)

        for dlc_file in sorted(dlc_files):
            print dlc_file
        print 'DLC total: ', len(self.dlc_groups),
        print ', loadcase total: ', loadcase_total,
        print ', describe_files total: ',len(describe_files.keys()),

    def varibles_analysis(self):

        variables = []
        for dlc_k,dlc_v in self.dlcs.items():
            for ld_k,ld_v in dlc_v.loadcases.items():
                for var_k in ld_v.variables:
                    variables.append(var_k)
        variables = list(set(variables))
        print ',describe_variables total: ', len(variables)

    @property
    def fieldgroup_id(self):
        return replace_id(self.sim_name)

    def create_fieldgroup(self):
        fgid = self.fieldgroup_id
        variables = {}

        #merge same variable
        for dlc_k,dlc_v in self.dlcs.items():
            for ld_k,ld_v in dlc_v.loadcases.items():
                for var_k,var_v in ld_v.variables.items():
                    if variables.has_key(var_k):
                        variables[var_k]['genlab'].extend(var_v['genlab'])
                    else:
                #        variables.update({var_k:{"id":var_v.variable_id,"unit":var_v.variable_unit,'genlab':var_v['genlab']}})
                        variables.update({var_k: var_v})
        dt = mddt120(id=fgid,name=self.sim_name)
        dt.add_tag("simulator")
        for id in ["fgId", "loadCaseId"]:
            dt.add_idfield(id=id,name=id,valueType="STRING",isIdField=True)
            self.fields.append(id)
        for id in ["sectionMax", "sectionId"]:
            dt.add_idfield(id=id,name=id,valueType="DOUBLE",isIdField=False)
            self.fields.append(id)
        for id in ["Blade station radius","Tower station height"]:
            dt.add_idfield(id=replace_id(id),name=id,unit="L",valueType="DOUBLE",isIdField=False)
            self.fields.append(id)

        for k in sorted(variables.keys()):
            v = variables[k]
            dt.add_idfield(id=v["id"],name=k,unit=v["unit"],valueType="DOUBLE",description=','.join(list(set(v['genlab']))))
            self.fields.append(v["id"])

        fg_path = os.getenv('FIELDGROUP_SAVED_PATH')
        if not os.path.exists(fg_path):
            os.makedirs(fg_path)
        json.dump(dt.__dict__,open("%s/fieldgroup.json"%fg_path,mode='w'),indent=2)

    def create_assets(self):
        fgid = self.fieldgroup_id
        assets=[]
        asset_saved_path = os.getenv('ASSETS_SAVED_PATH')

        if not os.path.exists(asset_saved_path):
            os.makedirs(asset_saved_path)

        for dlc_k,dlc_v in self.dlcs.items():
            for ld_k,ld_v in dlc_v.loadcases.items():
                lc_id = ld_v.lc_id
                asset = asset120(name=ld_k,fieldGroupId=fgid)
                asset.add_tag('simulator')
                asset.add_compound("fgId",fgid)
                asset.add_compound("loadCaseId",lc_id)

                json.dump(asset.__dict__,open("%s/%s.json"%(asset_saved_path,lc_id),mode='w'),indent=2)
                assets.append(asset)
        # for asset in assets:
        #     print json.dumps(asset.__dict__)
        print 'assets total: ', len(assets)


class BigNdarray(object):
    def __init__(self, sim_path):
        self.sim_path = os.path.realpath(sim_path)
        self.loads_path = os.path.join(self.sim_path,'Loads')

    @property
    def dlc_groups(self):
        groups = []
        for p in os.listdir(self.loads_path):
            real_path = os.path.join(self.loads_path, p)
            if os.path.isdir(real_path):
                groups.append(real_path)
        return groups

    def bigndarray(self):
        args = []
        for dlc in sorted(self.dlc_groups):
            for p in os.listdir(dlc):
                real_path = os.path.join(dlc, p)
                args.append(real_path)

        pool = Pool(int(os.getenv('PROCESSER_NUM')))
        pool.map(__process_bigndarray__, sorted(args))
        pool.close()
        pool.join()

    def kmxcsv(self):
        args = []

        rowheader = []
        idfields = []
        mapper = {}
        fgid = ""

        fg_path = os.getenv('FIELDGROUP_SAVED_PATH')
        fg = json.load(open("%s/fieldgroup.json"%fg_path))
        fgid = fg['id']
        for field in fg['fields']:
            if field['isIdField']:
                idfields.append(field['id'])
                continue
            rowheader.append(field['id'])
            mapper.update({field['id']: field['name']})

        for k in ["ts","loadCaseId","fgId","fieldGroupId"]:
            rowheader.insert(0, k)
            mapper.update({k: k})

        bigndarray_dlcs = []
        bigndarray_lcs = []
        dlcs_path = os.getenv('BIG_NDARRAY_SAVED_PATH')
        for dlc in os.listdir(dlcs_path):
            real_path = os.path.join(dlcs_path,dlc)
            bigndarray_dlcs.append(real_path)
            if os.path.isdir(real_path):
                for csv in os.listdir(real_path):
                    csv_path = os.path.join(real_path, csv)
                    if os.path.isfile(csv_path):
                        bigndarray_lcs.append(csv_path)
                        args.append((fgid,rowheader,mapper,csv_path))
        print len(bigndarray_lcs)
        pool = Pool(7)
        pool.map(__process_kmxcsv__, args)
        pool.close()
        pool.join()

def __process_kmxcsv__(args):
    fgid,rowheader,mapper,lc_path = args

    # parent_dirname = os.path.basename(os.path.dirname(lc_path))
    lc_name = os.path.basename(lc_path)
    lc_save_path = os.getenv('KMX_CSV_SAVED_PATH')
    if not os.path.exists(lc_save_path):
        os.makedirs(lc_save_path)
    save_path = os.path.join(lc_save_path,"kmx_%s"%lc_name)

    logging.info("process loadcase %s"%lc_name)

    with open(save_path,mode='w') as f:
        writer = csv.DictWriter(f,rowheader)
        writer.writeheader()
        reader = csv.DictReader(open(lc_path))
        row_data={
            "fieldGroupId":fgid,
            "fgId":fgid,
            "loadCaseId":replace_id(lc_name.strip('.csv'))
        }
        for line in reader:
            for k,v in line.items():
                if v == 'NAN' or v == 'nan':
                    line.pop(k)

                if k == 'ts':
                    t = datetime.datetime.fromtimestamp(float(v)).strftime("%Y-%m-%dT%H:%M:%S.%f+08:00")
                    line[k] = t
                line.update(row_data)
            writer.writerow(line)
        # writer.writerows(reader)
        f.close()
def __process_bigndarray__(lc_path):
    parent_dirname = os.path.basename(os.path.dirname(lc_path))
    lc_name = os.path.basename(lc_path)
    lc = LoadCase(lc_path)
    dlcs_saved_path = os.getenv('BIG_NDARRAY_SAVED_PATH')
    ld_save_path = os.path.join(dlcs_saved_path,parent_dirname)
    if not os.path.exists(ld_save_path):
        os.makedirs(ld_save_path)
    save_path = os.path.join(ld_save_path, "%s.csv" % lc_name)
    lc.save_bigndarray(save_path)


if __name__=='__main__':
    # logging.info("start process")
    #step1, create fg assets
    import sys
    # simulation = Simulation(sys.argv[1])
    # simulation.files_analysis()
    # simulation.varibles_analysis()
    # simulation.create_fieldgroup()
    # simulation.create_assets()

    #step2, create big ndarray

    #simulation.bigndarray()
    #step3, create kmx csv

    bigndarray = BigNdarray(sys.argv[1])
    # bigndarray.bigndarray()
    # logging.info("big ndarray finish.")

    logging.info("start kmx csv")
    bigndarray.kmxcsv()
    logging.info("kmx csv finish.")
