from django.shortcuts import render
from django.http import JsonResponse, HttpResponse, FileResponse
from django.db import connection
from django.db.models import F
from .models import TestProjectList as TPL, SourceData as SD, FeatureData as FD, CHANNELS,GOOD_CHANNELS, FEATURES,GOOD_FEATURES, ChannelAnalysis
from .tools import overview_data_process, trendilization, smooth_curve, str_to_list
from django.db.models import Count, Q, Avg, Max, Min
from django.forms.models import model_to_dict
import datetime
import copy
import math
import numpy as np
import pandas as pd

# Create your views here.


def get_test_list(request):
    '''获取测试集列表

    Params:request
        不携带任意参数

    Returns:JsonResponse:
        plan_list: 测试集检测对数组.
        exsample:

        [{'key':1, 'label':'SG0011832-32'},
         {'key':2, 'label':'SG8738212-22'},
         ...]
    '''

    plan_list = list(TPL.objects.annotate(
        key=F('pro_id'), label=F('test_no')).values('key', 'label'))

    return JsonResponse(plan_list, safe=False)


def get_overview(request):
    """test planning overview视图数据获取：

    Params:Request
        starterId: 测试集标识

    Returns:JsonResponse
        res: dict, 用dict方便扩展
        - global:全局统计数据
          - data: 每日不同起动类型的数量.
            exsample: [{'date': '2017-09-03', '0':11, '1':987, '-1':80,..}]
          - type_counts:测试集中不同起动类型的数量
            exsample: {'0':82178, '1':1263, '2':873, ...}
    """

    # 获取测试计划编号
    starter_id = int(request.GET['starterId'])

    # 存在的起动类型及对应数量
    # 并组装成type_counts
    class_counts = FD.objects.filter(pro_id=starter_id).values(
        'start_label').annotate(counts=Count('start_label'))
    type_counts = {}
    for k in class_counts:
        type_counts[str(k['start_label'])] = k['counts']

    # 测试的时间范围
    dates = list(FD.objects.filter(pro_id=starter_id).values(
        'day_time').distinct().order_by('day_time'))
    # 起止时间, 若时间不足90天, 扩展到90天
    begin_date = dates[0]['day_time']
    end_date = dates[-1]['day_time']
    if end_date < begin_date + datetime.timedelta(days=90):
        end_date = begin_date + datetime.timedelta(days=90)

    # 构造返回数据模板
    class_list = [c['start_label'] for c in class_counts]  # 起动类型list

    class_data = []  # 创建返回空数组
    while begin_date <= end_date:  # 遍历每一天，为每天每种起动类型赋0
        dict_temp = dict.fromkeys(class_list, 0)
        dict_temp.update({'date': begin_date})
        class_data.append(dict_temp)
        begin_date += datetime.timedelta(days=1)

    # 更新模板数据，
    # 组装成{'0':[{'date':xxx, 'count':xxx},{'date':xxx, 'count':xxx},..]}

    dataset = {}
    for i in range(len(class_counts)):
        label = class_counts[i]['start_label']
        start_time = datetime.datetime.now()
        dataset[str(label)] = list(FD.objects.filter(pro_id=starter_id, start_label=label).annotate(date=F('day_time')).values(
            'date').annotate(count=Count('date')))
        end_time = datetime.datetime.now()
        timequery = "query label %s costs %s" % (label, end_time - start_time)
        print(timequery)
    # 处理成前端api需要的接口
    dataset = overview_data_process(dataset)

    for data in class_data:
        if str(data['date']) in dataset:
            data.update(dataset[str(data['date'])])

    res = {'global': {'data': class_data, 'type_counts': type_counts}}

    return JsonResponse(res, safe=False)


def get_default_table(request):
    """
    获取默认Test List数据
    """
    starter_id = int(request.GET['starterId'])
    start = int(request.GET['offset'])
    size = int(request.GET['size'])
    total = SD.objects.filter(pro_id=starter_id).count()
    data = SD.objects.filter(pro_id=starter_id).annotate(start_time=F('test_time'), abnormal_type=F('start_label')).values(
        'test_id', 'start_time', 'abnormal_type').order_by('test_id')[start:start + size]
    data = list(data)
    table = {'total': total, 'list': data}

    return JsonResponse(table, safe=False)


def query_selection(request):
    """
    根据前端选中部分获取对应的test_id数据集合,传给前端table
    params: date string 日期;
            label string 异常还是正常
            type int 类型数值
    """
    starter_id = int(request.GET['starterId'])  # 测试数据集id
    start = int(request.GET['offset'])  # 分页的偏距
    size = int(request.GET['size'])  # 目标读取数量
    dates = request.GET.getlist('dates[]')  # 查询条件：日期数组
    types = request.GET.getlist('types[]')  # 查询条件：类型数组

    datalist = []
    sql_query_start = datetime.datetime.now()
    if len(dates) and len(types):
        # data = SD.objects.filter(
        #     pro_id=starter_id, day_time__in=dates, start_label__in=types)[start:start + size]

        # for t in data:
        #     datalist.append({
        #         'test_id': t.test_id,
        #         'start_time': t.test_time,
        #         'abnormal_type': t.start_label
        #     })

        # SELECT test_id,test_time,start_label from source_data WHERE day_time BETWEEN '2013-09-20' and '2013-09-20'; ???

        query_count_start = datetime.datetime.now()
        total = SD.objects.filter(
            pro_id=starter_id, day_time__in=dates, start_label__in=types).count()
        print("本次COUNT is %s" % (total))
        query_count_end = datetime.datetime.now()
        print("[取count]%s" % str(query_count_end-query_count_start))

        query_testids_start = datetime.datetime.now()
        # 换成Feature表看看会不会更快
        # 又要改回SD 因为FD没有时分秒
        data = SD.objects.filter(pro_id=starter_id, day_time__in=dates, start_label__in=types).annotate(start_time=F(
            'test_time'), abnormal_type=F('start_label')).values('test_id', 'start_time', 'abnormal_type').order_by('start_time')[start: start+size]
        datalist = list(data)
        query_testids_end = datetime.datetime.now()
        print("[取testids]%s" % (str(query_testids_end-query_testids_start)))
    else:
        total = 0
    sql_query_end = datetime.datetime.now()
    print("[sql查询总用时]%s" % (str(sql_query_end-sql_query_start)))
    table = {'total': total, 'list': datalist}

    return JsonResponse(table, safe=False)

def query_selection_counts(request):
    starter_id = request.GET['starterId']
    dates = request.GET.getlist('dates[]')
    types = request.GET.getlist('types[]')
    count = FD.objects.filter(
            pro_id=starter_id, day_time__in=dates, start_label__in=types).count()
    return JsonResponse({"count":count},safe=False)


def query_selection_trend(request):
    """
    返回选中数据的趋势线数据
    参数与query_selection方法一致
    params: dates Array 日期;
            type Array 类型数值
    """
    starter_id = request.GET['starterId']
    dates = request.GET.getlist('dates[]')
    types = request.GET.getlist('types[]')

    res = {}
    if len(dates) and len(types):
        trend_query_begin_timestamp = datetime.datetime.now()
        query = SD.objects.filter(
            pro_id=starter_id, day_time__in=dates, start_label__in=types).values('test_id')
        tests = [t['test_id'] for t in query]
        data = list(SD.objects.filter(pro_id=starter_id,
                                      test_id__in=tests).values(*GOOD_CHANNELS))
        trend_query_end_timestamp = datetime.datetime.now()
        print("[trend-数据库查询用时]%s" %
              str(trend_query_end_timestamp-trend_query_begin_timestamp))

        for channel in GOOD_CHANNELS:
            eval_begin_timestamp = datetime.datetime.now()
            res[channel] = [eval(
                v[channel]) if v[channel] else None for v in data]  # 把字符串解析为list
            eval_end_timestamp = datetime.datetime.now()
            print("[eval用时]%s-%s" %
                  (channel, str(eval_end_timestamp-eval_begin_timestamp)))
            trendilization_begin_timestamp = datetime.datetime.now()
            res[channel] = trendilization(res[channel])
            trendilization_end_timestamp = datetime.datetime.now()
            print("[trendization-算法用时]%s-%s" % (channel, str(trendilization_end_timestamp -
                                                             trendilization_begin_timestamp)))

    return JsonResponse(res, safe=False)


def query_selection_trend_by_channel(request):
    starter_id = request.GET['starterId']
    dates = request.GET.getlist('dates[]')
    types = request.GET.getlist('types[]')
    channel = request.GET['channel']
    res = {}
    if len(dates) and len(types):
        query = FD.objects.filter(
            pro_id=starter_id, day_time__in=dates, start_label__in=types).values('test_id')
        tests = [t['test_id'] for t in query]
        data = list(SD.objects.filter(pro_id=starter_id,
                                      test_id__in=tests).values(channel))
        res[channel] = [eval(
                v[channel]) if v[channel] else None for v in data]
        res[channel] = trendilization(res[channel])
    return JsonResponse(res,safe=False)
    

def query_dataset_trend(request):
    starter_id = int(request.GET['starterId'])
    type_id = int(request.GET['type'])
    query = ChannelAnalysis.objects.get(pro_id=starter_id, start_label=type_id)
    channels_data = getattr(query, "channels_data")

    return HttpResponse(channels_data,content_type="application/json")

def query_types_by_dates(request):
    starter_id = int(request.GET['starterId'])
    dates = request.GET.getlist("dates[]")
    q = (FD.objects.filter(pro_id=starter_id,day_time__in=dates).values("start_label").distinct())
    types = [x['start_label'] for x in q]

    return JsonResponse(types,safe=False)

def test_detail(request):
    """
    获取对应起动机及对应测试id的具体测试数据
    这里不再对不同测试通道细分，一次取所有测试通道的抽样数据，由前端实现按需显示
    """
    starter_id = int(request.GET['starterId'])
    test_id = int(request.GET['testId'])
    start_of_fetch_database = datetime.datetime.now()
    data = list(SD.objects.filter(pro_id=starter_id,
                                  test_id=test_id).values(*GOOD_CHANNELS))
    end_of_fetch_database = datetime.datetime.now()
    print("testid=%s取数据用时%s" %
          (test_id, str(end_of_fetch_database-start_of_fetch_database)))
    data_dict = dict.fromkeys(GOOD_CHANNELS)
    start_of_eval = datetime.datetime.now()
    for channel in GOOD_CHANNELS:
        if data[0][channel]:
            data_dict[channel] = eval(data[0][channel])
        else:
            del data_dict[channel]
    end_of_eval = datetime.datetime.now()
    print("多个channel的eval耗时%s" % (str(end_of_eval-start_of_eval)))
    length = []
    for i in data_dict:
        length.append(len(data_dict[i]))
    min_len = min(length)
    for i in data_dict:
        data_dict[i] = data_dict[i][:min_len]

    dataset = []
    for key in data_dict.keys():
        dataset.append({
            'channel': key,
            'data': data_dict[key]
        })

    return JsonResponse({"test_id": test_id, "channel_data": dataset}, safe=False)


def feature_coord_v2(request):
    starter_id = int(request.GET['starterId'])
    attrs = request.GET.getlist('attrs[]')
    types = [int(x) for x in request.GET.getlist('types[]')]
    dates = request.GET.getlist('dates[]')
    query = SD.objects.filter(
        pro_id=starter_id, start_label__in=types, day_time__in=dates).values('test_id')
    test_ids = [t['test_id'] for t in query]
    records = FD.objects.values(
        "test_id", *attrs).filter(test_id__in=test_ids, pro_id=starter_id)
    schemes = [{"name": x, "index": i, "text": x} for i, x in enumerate(attrs)]

    abnormal_type_datas = [{"type": x, "data": []} for x in types]
    for record in records:
        data = [record[x] for x in attrs]
        abnormal_type = SD.objects.values("start_label").get(
            test_id=record['test_id'], pro_id=starter_id)['start_label']
        for abnormal_type_data in abnormal_type_datas:
            if(abnormal_type_data['type'] == abnormal_type):
                abnormal_type_data['data'].append(data)

    for abnormal_type_data in abnormal_type_datas:
        if(int(abnormal_type_data['type']) != -1):
            limit_total = 500
            if(len(abnormal_type_data['data']) > limit_total):
                tmp_list = abnormal_type_data['data']
                offset = int(len(tmp_list)/limit_total)
                abnormal_type_data['data'] = [
                    x for i, x in enumerate(tmp_list) if i % offset == 0]

    return JsonResponse({"schemes": schemes, "types": abnormal_type_datas}, safe=False)


def feature_trend(request):
    starter_id = request.GET['starterId']
    attrs = request.GET['attrs'].split(",")
    fd_query = FD.objects.values(
        "test_id", *attrs).filter(pro_id=int(starter_id))
    fd_query_length = len(fd_query)
    fd_centipede_query = fd_query
    lengthChu250 = int(fd_query_length/250)
    attrs_list = list(fd_query[::250])
    attrs_list = smooth_curve(attrs_list)

    attributes_data = dict.fromkeys(attrs)
    for attr_name in attrs:
        attributes_data[attr_name] = {}
    print("part 1::",len(attrs_list))
    for item in attrs_list:
        for attr_name in attrs:
            if 'points' not in attributes_data[attr_name]:
                attributes_data[attr_name]['points'] = []
            print("part 1.5")
            start_time = SD.objects.get(test_id=int(
                item['test_id']), pro_id=int(starter_id)).day_time
            print("part 1.9")
            attributes_data[attr_name]['points'].append({
                'test_id': item['test_id'], 'value': item[attr_name],
                "date": start_time
            })
    print("part 2:")
    lengthChu1000 = int(fd_query_length/1000)
    gap1000 = int(fd_query_length/lengthChu1000)
    gap1000 = 2000 # 算了算了 为了好搞 直接1000吧
    # another_attrs_list = list(fd_query[::])
    for attr_name in attrs:
        attributes_data[attr_name]['centipede'] = []
        gap_count = 0
        while gap_count * gap1000 < fd_query_length:
            start_item = fd_centipede_query[gap1000*gap_count]
            start_testid = int(start_item['test_id'])
            end_index = gap1000 * (gap_count+1) # 左闭右开
            if(end_index > fd_query_length):
                end_index = fd_query_length
            sect_data = [x[attr_name] for x in list(fd_centipede_query[gap1000*gap_count:end_index])]
            if(len(sect_data)>0):
                cv = np.std(sect_data)/np.mean(sect_data)
                smooth_sect_data = pd.DataFrame(sect_data).rolling(window=100,min_periods=1,center=True).mean()
                skew = pd.DataFrame(smooth_sect_data).skew()
                attributes_data[attr_name]['centipede'].append({
                    "cv":cv,
                    "skew":skew[0],
                    "test_id":start_testid
                 })
            gap_count = gap_count + 1


    for attr_name in attrs:
        max_number = max(map(lambda x: x[attr_name], attrs_list))
        min_number = min(map(lambda x: x[attr_name], attrs_list))
        attributes_data[attr_name]['codomain'] = [min_number, max_number]

    return JsonResponse({'attributes_data': attributes_data}, safe=False)

def feature_raw_line(request):
    starter_id = request.GET['starterId']
    attr = request.GET['attr']
    extent = request.GET.getlist("extent[]")
    attr_query_set = FD.objects.filter(test_id__gte=extent[0],
                                       test_id__lte=extent[1], pro_id=starter_id)
    iterator = attr_query_set.iterator()
    points = []
    while True:
        try:
            x = model_to_dict(next(iterator))
            points.append({"test_id": x['test_id'], "value": x[attr]})
        except StopIteration:
            break

    abnormal_query_set = SD.objects.filter(
        test_id__gte=extent[0], test_id__lte=extent[1], pro_id=starter_id)
    iterator = abnormal_query_set.iterator()
    abnormals = []
    while True:
        try:
            x = model_to_dict(next(iterator))
            abnormals.append(
                {"test_id": x['test_id'], "abnormal_type": x['start_label']})
        except StopIteration:
            break

    return JsonResponse({"points": points, "abnormals": abnormals}, safe=False)


def feature_selected_trend(request):
    starter_id = request.GET['starterId']
    attr = request.GET['attr']
    types = [int(x) for x in request.GET.getlist('types[]')]
    dates = request.GET.getlist('dates[]')
    sd_query = SD.objects.values("test_id").filter(pro_id=starter_id,
                                                   start_label__in=types,
                                                   day_time__in=dates)
    sd_query_len = len(sd_query)
    TOTAL_LIMIT = 500
    if(sd_query_len > TOTAL_LIMIT):
        sd_query = sd_query[::int(sd_query_len/TOTAL_LIMIT)]
    test_ids = [x["test_id"] for x in list(sd_query)]
    fd_query = FD.objects.values("test_id", attr).filter(pro_id=starter_id,
                                                         test_id__in=test_ids)
    feature_data = [{"test_id": x['test_id'], "value":x[attr]}
                    for x in list(fd_query)]

    return JsonResponse({"feature_name": attr,
                         "test_id_range": [min(test_ids), max(test_ids)],
                         "feature_data": feature_data}, safe=False)

def feature_single(request):
    """
    获取某(几)个testid对应的feature值
    """
    starter_id = request.GET['starterId']
    test_ids = [int(x) for x in request.GET.getlist("test_ids[]")]
    query = FD.objects.filter(test_id__in=test_ids,pro_id=starter_id).values("test_id",*GOOD_FEATURES)

    return JsonResponse(list(query),safe=False)




def feature_list(request):
    return JsonResponse({"features": FEATURES}, safe=False)


def channel_list(request):
    return JsonResponse({"channels": CHANNELS}, safe=False)
