import copy

from django.shortcuts import render, redirect
import os
from datetime import datetime, timedelta, time
from web.models import Leak
from django.utils.safestring import mark_safe
from django.db.models import Count
from web.utils.getJsonList import get_json_list, merge_json


def load(request):
    """加载数据至MySQL"""
    # 清空数据库
    Leak.objects.all().delete()
    # 加载数据，并写入数据库中
    from web.utils.load import load
    load(f"{os.getcwd()}/web/static/data")
    # 重定向至主页
    return redirect("/")


def list_view(request):
    """列表视图"""
    from web.utils.pagination import Pagination
    # 获取前端输入
    v_type_get = request.GET.get("v_type", "")
    severity_get = request.GET.get("severity", "")

    # 生成漏洞类型动态下拉框选项
    v_type_select = "<option></option>"
    v_type_qs = Leak.objects.all().values("v_type").distinct()
    for dic in v_type_qs:
        if dic["v_type"] == v_type_get:
            v_type_select += f"<option selected>{dic['v_type']}</option>"
        else:
            v_type_select += f"<option>{dic['v_type']}</option>"
    # 生成漏洞等级动态下拉框选项
    severity_select = "<option></option>"
    severity_qs = Leak.objects.all().values("severity").distinct()
    for dic in severity_qs:
        if dic["severity"]:
            if dic["severity"] == severity_get:
                severity_select += f"<option selected>{dic['severity']}</option>"
            else:
                severity_select += f"<option>{dic['severity']}</option>"

    # 写入筛选条件
    search_dict = {}
    if severity_get:
        search_dict["severity"] = severity_get
    if v_type_get:
        search_dict["v_type"] = v_type_get

    # ORM筛选符合的数据
    qs = Leak.objects.filter(**search_dict)
    # 分页操作
    page = Pagination(request, "page", qs, page_size=6,
                      first_switch=True, last_switch=True, a_next=True, a_prev=True)

    # 总览的四张饼状图所需数据
    # 1
    pie_published_dic = qs.values("published").annotate(num=Count("id"))
    published_data = {}
    for dic in pie_published_dic:
        year = datetime.strftime(dic["published"], "%Y")
        if year not in published_data.keys():
            published_data[year] = 1
        else:
            published_data[year] += 1
    pie_published_data = [{"name": k, "value": v} for k, v in published_data.items()]
    pie_published_data = sorted(pie_published_data, key=lambda x: x["value"], reverse=True)
    # 2
    pie_modified_dic = qs.values("modified").annotate(num=Count("id"))
    modified_data = {}
    for dic in pie_modified_dic:
        year = datetime.strftime(dic["modified"], "%Y")
        if year not in modified_data.keys():
            modified_data[year] = 1
        else:
            modified_data[year] += 1
    pie_modified_data = [{"name": k, "value": v} for k, v in modified_data.items()]
    pie_modified_data = sorted(pie_modified_data, key=lambda x: x["value"], reverse=True)
    # 3
    pie_severity_data = [
        {"name": i["severity"], "value": i["num"]} if i["severity"] else {"name": "未知", "value": i["num"]}
        for i in qs.values("severity").annotate(num=Count("id"))
    ]
    pie_severity_data = sorted(pie_severity_data, key=lambda x: x["value"], reverse=True)

    # 4
    pie_v_type_data = [
        {"name": i["v_type"], "value": i["num"]} for i in qs.values("v_type").annotate(num=Count("id"))
    ]
    pie_v_type_data = sorted(pie_v_type_data, key=lambda x: x["value"], reverse=True)

    response = {
        # 动态下拉框html代码生成
        "v_type_select": mark_safe(v_type_select),
        "severity_select": mark_safe(severity_select),
        # 可视化所需数据
        "pie_published": mark_safe(pie_published_data[:10]),
        "pie_modified": mark_safe(pie_modified_data[:10]),
        "pie_severity": mark_safe(pie_severity_data),
        "pie_v_type": mark_safe(pie_v_type_data[:10]),
        # 当前选择页码的对应数据
        "leaks": page.page_queryset,
        # 动态页码html代码生成
        "page_html": page.html(),
    }
    return render(request, "list.html", response)


def info(request, nid):
    """ 列表视图详情页 """
    the_leak = Leak.objects.filter(id=nid).first()
    response = {
        # 通过Django URLs传递id，然后再MySQL中查询相应id的信息
        'leak': the_leak,
    }
    return render(request, "info.html", response)


def overlook(request):
    """可视化数据概览"""
    # 堆叠柱状图 每年发布的各等级漏洞数量
    year_severity_dict = {}
    for dic in Leak.objects.values('published', 'severity').order_by('published'):
        year = dic["published"].year
        if dic["severity"]:
            if year not in year_severity_dict:
                year_severity_dict[year] = {"低危": 0, "中危": 0, "高危": 0, "超危": 0}
                year_severity_dict[year][dic["severity"]] += 1
            else:
                year_severity_dict[year][dic["severity"]] += 1
    # 整理为ECharts可视化要求的格式
    line_x = []
    line_level1 = []  # 低危
    line_level2 = []  # 中危
    line_level3 = []  # 高危
    line_level4 = []  # 超危
    for key, value in year_severity_dict.items():
        line_x.append(key)
        line_level1.append(value['低危'])
        line_level2.append(value['中危'])
        line_level3.append(value['高危'])
        line_level4.append(value['超危'])

    # 每年发布的漏洞类型数量 散点气泡图
    year_v_type_dict = {}
    init_value = {dic["v_type"]: 0 for dic in Leak.objects.values('v_type').distinct().order_by('v_type')}
    for dic in Leak.objects.values('published', 'v_type').order_by('published'):
        year = dic["published"].year
        if year not in year_v_type_dict:
            year_v_type_dict[year] = copy.deepcopy(init_value)
            year_v_type_dict[year][dic["v_type"]] += 1
        else:
            year_v_type_dict[year][dic["v_type"]] += 1
    print(year_v_type_dict)

    scatter_x = list(year_v_type_dict.keys())
    scatter_y = list(init_value.keys())
    scatter_data = []
    for year in scatter_x:
        v_type_dict = year_v_type_dict[year]
        # 散点图数据获取以及格式调整
        for k, v in v_type_dict.items():
            scatter_data.append([scatter_y.index(k), scatter_x.index(year), v])

    # 词云图数据
    import pickle
    """
    # 更新分词模型时，就取消注释
    # # 需要清洗的关键词
    ban = [
        "\\", "/", "-", ".", "_", " ", "多个", "和", "'", '"', "‘", "’"
    ]
    # # 获取所有数据的名字
    word_cloud_data = [dic["name"] for dic in Leak.objects.values("name")]
    # # 导入库
    import jieba
    from collections import Counter
    # # 分词
    words = jieba.lcut(" ".join(word_cloud_data))
    # 计数，而且调整格式
    word_cloud = [{"name": k, "value": v} for k, v in Counter(words).items() if k not in ban]
    # 保存为pickle文件
    with open(f'{os.getcwd()}/web/static/cloud.pickle', 'wb') as f:
        pickle.dump(word_cloud, f)
    """
    # 调用pickle文件
    with open(f'{os.getcwd()}/web/static/cloud.pickle', 'rb') as f:
        word_cloud = pickle.load(f)

    response = {
        # 词云图(限制前100个词条，不然网页会很卡)
        "word_cloud": mark_safe(word_cloud[:100]),
        # 堆叠柱状图数据上传
        "line_x": mark_safe(line_x),
        "line_level1": mark_safe(line_level1),
        "line_level2": mark_safe(line_level2),
        "line_level3": mark_safe(line_level3),
        "line_level4": mark_safe(line_level4),
        # 散点图数据上传
        "scatter_x": mark_safe(scatter_x),
        "scatter_y": mark_safe(scatter_y),
        "scatter_data": mark_safe(scatter_data),
    }
    return render(request, "overlook.html", response)


def chart(request):
    """分析可视化"""
    # # 获取前端输入
    # start_get = request.GET.get("start", "2023")
    # end_get = request.GET.get("end", "2024")
    # start = datetime.strptime(start_get, "%Y")
    # end = datetime.strptime(end_get, "%Y")
    # # # 构建动态下拉框选项
    # start_select_html = ""
    # end_select_html = ""
    # for year in reversed(range(1900, 2025)):
    #     if str(year) == start_get:
    #         start_select_html += f"<option selected>{year}</option>"
    #     else:
    #         start_select_html += f"<option>{year}</option>"
    #     if str(year) == end_get:
    #         end_select_html += f"<option selected>{year}</option>"
    #     else:
    #         end_select_html += f"<option>{year}</option>"
    # # date为x轴，漏洞数量为y轴（漏洞趋势折线图）
    # trend_line_x = []
    # trend_line_y = []
    # for dic in Leak.objects.values("published").annotate(leak_num=Count('id')).order_by('published'):
    #     published = datetime.combine(dic["published"], time(0, 0))
    #     if start <= published <= end:
    #         trend_line_x.append(datetime.strftime(dic["published"], "%Y-%m-%d"))
    #         trend_line_y.append(dic["leak_num"])
    #
    # # 漏洞分类为x轴，分等级的数量为堆叠y轴（堆叠柱状图 + 总数折线图）
    # v_type_severity_dict = {}
    # for dic in Leak.objects.values("v_type", "severity").order_by("v_type"):
    #     if dic['severity']:
    #         if dic["v_type"] not in v_type_severity_dict:
    #             v_type_severity_dict[dic["v_type"]] = {dic['severity']: 1}
    #         else:
    #             if dic['severity'] not in v_type_severity_dict[dic["v_type"]]:
    #                 v_type_severity_dict[dic["v_type"]][dic['severity']] = 1
    #             else:
    #                 v_type_severity_dict[dic["v_type"]][dic['severity']] += 1
    # bar_x = []
    # bar_low_y = []
    # bar_mid_y = []
    # bar_high_y = []
    # bar_over_y = []
    # bar_all_y = []
    # for key, value in v_type_severity_dict.items():
    #     bar_x.append(key)
    #     over = value.get("超危") if value.get("超危") else 0
    #     high = value.get("高危") if value.get("高危") else 0
    #     mid = value.get("中危") if value.get("中危") else 0
    #     low = value.get("低危") if value.get("低危") else 0
    #     bar_over_y.append(over)
    #     bar_high_y.append(high)
    #     bar_mid_y.append(mid)
    #     bar_low_y.append(low)
    #     bar_all_y.append(over + high + mid + low)
    #
    # # 低中高超危的解决时间 icon
    # icon = []
    # for dic in Leak.objects.all().values("severity", "published", "modified"):
    #     icon.append((dic["severity"], (dic["modified"] - dic["published"]).days))
    # low_level_max_icon = 0
    # mid_level_max_icon = 0
    # high_level_max_icon = 0
    # over_level_max_icon = 0
    # for i in icon:
    #     if low_level_max_icon < i[1] and i[0] == "低危":
    #         low_level_max_icon = i[1]
    #     if mid_level_max_icon < i[1] and i[0] == "中危":
    #         mid_level_max_icon = i[1]
    #     if high_level_max_icon < i[1] and i[0] == "高危":
    #         high_level_max_icon = i[1]
    #     if over_level_max_icon < i[1] and i[0] == "超危":
    #         over_level_max_icon = i[1]
    # response = {
    #     # 第一张图
    #     "start_select_html": mark_safe(start_select_html),
    #     "end_select_html": mark_safe(end_select_html),
    #     "trend_line_x": mark_safe(trend_line_x),
    #     "trend_line_y": mark_safe(trend_line_y),
    #     # 第二张图
    #     "bar_x": mark_safe(bar_x),
    #     "bar_low_y": mark_safe(bar_low_y),
    #     "bar_mid_y": mark_safe(bar_mid_y),
    #     "bar_high_y": mark_safe(bar_high_y),
    #     "bar_over_y": mark_safe(bar_over_y),
    #     "bar_all_y": mark_safe(bar_all_y),
    #     # icon
    #     "low_level_max_icon": low_level_max_icon,
    #     "mid_level_max_icon": mid_level_max_icon,
    #     "high_level_max_icon": high_level_max_icon,
    #     "over_level_max_icon": over_level_max_icon,
    # }
    # progress_legend_list
    # progress_series_data
    num1 = Leak.objects.values("v_type").distinct().count()
    num2 = Leak.objects.values("severity").distinct().count()
    print(num1)
    print(num2)

    progress_legend_list = []
    progress_series_data = []
    for dic in Leak.objects.values("severity").annotate(count=Count("v_id")):
        if dic["severity"] == "":
            dic["severity"] = "未分类"
        progress_legend_list.append(dic["severity"])
        progress_series_data.append({"name": dic["severity"], "value": dic["count"]})

    progress_series_data2 = []
    for dic in Leak.objects.values("v_type").annotate(count=Count("v_id")):
        progress_series_data2.append({"name": dic["v_type"], "value": dic["count"]})

    progress_series_data2 = sorted(progress_series_data2, key=lambda x: x["value"], reverse=True)
    progress_legend_list2 = [dic["name"] for dic in progress_series_data2[:5]]

    response = {
        "num1": num1,
        "num2": num2,
        "num3": Leak.objects.count(),
        "progress_pie": {
            "legend": progress_legend_list, "series": progress_series_data,
        },
        "progress_pie2": {
            "legend": progress_legend_list2, "series": progress_series_data2[:5],
        },
    }
    return render(request, "chart.html", response)


def predict(request):
    """漏洞解决时间预测"""
    # 获取今日日期
    today = datetime.now().strftime("%Y-%m-%d")
    # 获取前端输入
    v_type_get = request.GET.get("v_type", "")
    severity_get = request.GET.get("severity", "")
    published_get = request.GET.get("published", today)
    published_get = datetime.strptime(published_get, "%Y-%m-%d")

    # 生成动态下拉列表（大数据）
    type_list = [dic["v_type"] for dic in Leak.objects.values("v_type").distinct().order_by("v_type")]
    type_option_html = "<option></option>"
    for item in type_list:
        if item == v_type_get:
            type_option_html += f'<option selected>{item}</option>'
        else:
            type_option_html += f'<option>{item}</option>'
    severity_list = [dic["severity"] for dic in Leak.objects.values("severity").distinct().order_by("severity")]
    severity_option_html = ""
    for item in severity_list:
        if item == severity_get:
            severity_option_html += f'<option selected>{item}</option>'
        else:
            severity_option_html += f'<option>{item}</option>'

    day_list = []
    for dic in Leak.objects.filter(v_type=v_type_get, severity=severity_get).values("published", "modified"):
        # 有的是0，所以要加1
        day = (dic["modified"] - dic["published"]).days + 1
        day_list.append(day)

    if v_type_get and severity_get and published_get:
        # 预测
        from sklearn.linear_model import LinearRegression
        import numpy as np
        lr = LinearRegression()
        train_x = []
        train_y = []
        for i, day in enumerate(day_list):
            train_x.append([i + 1])
            train_y.append([day])
        lr.fit(np.array(train_x), np.array(train_y))
        res = lr.predict(np.array([len(train_x) + 1]).reshape(1, -1))[0][0]
        res = (res + 100)
        print(res)
        # 计算时间
        res_date = published_get + timedelta(days=res)
        res_date = datetime.strftime(res_date, "%Y年%m月%d日")
    else:
        res_date = ""

    response = {
        "type_option_html": mark_safe(type_option_html),
        "severity_option_html": mark_safe(severity_option_html),
        "published": datetime.strftime(published_get, "%Y-%m-%d"),
        "today": today,
        "res_date": res_date,
    }
    return render(request, "predict.html", response)
