import json
from django.http import HttpResponse
from django.shortcuts import render
from django.views.generic.base import View
from elasticsearch import Elasticsearch
from datetime import datetime
import redis
from searchApp.models import WuYictoIndex,TodayJobIndex,GuokrIndex
import re
from urllib import parse

client = Elasticsearch(hosts=["127.0.0.1"])
redis_cli = redis.StrictRedis(decode_responses=True)


class IndexView(View):
    # 首页热门搜索
    def get(self, request):
        topn_search = redis_cli.zrevrangebyscore("search_keywords_set", "+inf", "-inf", start=0, num=5)
        return render(request, "index.html", {"topn_search": topn_search})


class SearchSuggest(View):
    def get(self, request):
        key_words = request.GET.get('s', '')
        key_words = parse.unquote(key_words)
        # 获取搜索类型，指定es搜索index
        s_type = request.GET.get("s_type", "")
        re_datas = []
        if key_words:
                if s_type == "article":
                    s = WuYictoIndex.search()
                    s = s.suggest('my_suggest', key_words, completion={
                        "field": "suggest", "fuzzy": {
                            "fuzziness": 2, #最小编辑距离 就是字符匹配操作次数
                            "prefix_length":2, #字符串最前几个字符不变
                        },
                        "size": 10
                    })
                    suggestions = s.execute_suggest()
                    for match in suggestions.my_suggest[0].options:
                        source = match._source
                        re_datas.append(source["title"])

                if s_type == "question":
                    s = GuokrIndex.search()
                    s = s.suggest('my_suggest', key_words, completion={
                        "field": "suggest", "fuzzy": {
                            "fuzziness": 3  , #最小编辑距离 就是字符匹配操作次数
                            "prefix_length":2, #字符串最前几个字符不变
                        },
                        "size": 10
                    })
                    suggestions = s.execute_suggest()
                    for match in suggestions.my_suggest[0].options:
                        source = match._source
                        re_datas.append(source["tags"])

                if s_type == "job":
                    s = TodayJobIndex.search()
                    s = s.suggest('my_suggest', key_words, completion={
                        "field": "suggest", "fuzzy": {
                            "fuzziness": 3, #最小编辑距离 就是字符匹配操作次数
                            "prefix_length":2, #字符串最前几个字符不变
                        },
                        "size": 10
                    })
                    suggestions = s.execute_suggest()
                    for match in suggestions.my_suggest[0].options:
                        source = match._source
                        re_datas.append(source["title"])

        return HttpResponse(json.dumps(re_datas), content_type="application/json")


class SearchView(View):
    def get(self, request):
        key_words = request.GET.get("q", "").strip()
        key_words = parse.unquote(key_words)
        # 获取搜索类型，指定es搜索index
        s_type = request.GET.get("s_type", "")

        # 为有序集 key 的成员 member 的 score 值加上增量 increment
        redis_cli.zincrby("search_keywords_set", '1', key_words)
        # 热门搜索top5
        # 返回有序集 key 中， score 值介于 max 和 min 之间(默认包括等于 max 或 min )的所有的成员。有序集成员按 score 值递减(从大到小)的次序排列。
        topn_search = redis_cli.zrevrangebyscore("search_keywords_set", "+inf", "-inf", start=0, num=5)
        page = request.GET.get("p", "1")
        try:
            page = int(page)
        except:
            page = 1

        wuyicto_count = redis_cli.get("51cto_count")
        today_job_count = redis_cli.get("today_job_count")
        Guokr_question_count = redis_cli.get("Guokr_question_count")
        start_time = datetime.now()

        hit_list = []
        if s_type == "article":
            response = client.search(
                index="51cto",
                body={
                    "query": {
                        "multi_match": {
                            "query": key_words,
                            "fields": ["tags", "title", "content"]
                        }
                    },
                    "from": (page - 1) * 10,
                    "size": 10,
                    "highlight": {
                        "pre_tags": ['<span class="keyWord">'],
                        "post_tags": ['</span>'],
                        "fields": {
                            "title": {},
                            "content": {},
                        }
                    }
                }
            )
            for hit in response["hits"]["hits"]:
                hit_dict = {}
                try:
                    if "title" in hit["highlight"]:
                        hit_dict["title"] = "".join(hit["highlight"]["title"])
                    else:
                        hit_dict["title"] = hit["_source"]["title"]
                    if "content" in hit["highlight"]:
                        hit_dict["content"] = "".join(hit["highlight"]["content"])[:500]
                    else:
                        hit_dict["content"] = hit["_source"]["content"][:500]
                except Exception as e:        #如果数据获取出现异常跳出当前循环，执行下一次循环
                    continue

                hit_dict["create_time"] = hit["_source"]["create_time"].split('T')[0]
                hit_dict["url"] = hit["_source"]["url"]
                hit_dict["origin"] = hit["_source"]["origin"]
                hit_dict["origin_url"] = hit["_source"]["origin_url"]
                hit_dict["score"] = hit["_score"]
                hit_list.append(hit_dict)

        elif s_type == "question":
            response = client.search(
                index="guokr",
                body={
                    "query": {
                        "multi_match": {
                            "query": key_words,
                            "fields": ["tags", "title", "answer_content"]
                        }
                    },
                    "from": (page - 1) * 10,
                    "size": 10,
                    "highlight": {
                        "pre_tags": ['<span class="keyWord">'],
                        "post_tags": ['</span>'],
                        "fields": {
                            "title": {},
                            "answer_content": {},
                        }
                    }
                }
            )
            for hit in response["hits"]["hits"]:
                hit_dict = {}
                try:
                    if "title" in hit["highlight"]:
                        hit_dict["title"] = "".join(hit["highlight"]["title"])
                    else:
                        hit_dict["title"] = hit["_source"]["title"]
                    if "answer_content" in hit["highlight"]:
                        hit_dict["content"] = "".join(hit["highlight"]["answer_content"])[:500]
                    else:
                        hit_dict["content"] = hit["_source"]["answer_content"][:500]
                except Exception as e:        #如果数据获取出现异常跳出当前循环，执行下一次循环
                    continue

                hit_dict["create_time"] = hit["_source"]["create_time"].split('T')[0]
                hit_dict["url"] = hit["_source"]["url"]
                hit_dict["origin"] = hit["_source"]["origin"]
                hit_dict["origin_url"] = hit["_source"]["origin_url"]
                hit_dict["score"] = hit["_score"]
                hit_list.append(hit_dict)
        else:
            response = client.search(
                index="today_job",
                body={
                    "query": {
                        "multi_match": {
                            "query": key_words,
                            "fields": ["title", "job_desc", "company_name"]
                        }
                    },
                    "from": (page - 1) * 10,
                    "size": 10,
                    "highlight": {
                        "pre_tags": ['<span class="keyWord">'],
                        "post_tags": ['</span>'],
                        "fields": {
                            "title": {},
                            "job_desc": {},
                            "company_name": {},
                        }
                    }
                }
            )
            for hit in response["hits"]["hits"]:
                hit_dict = {}
                try:
                    if "title" in hit["highlight"]:
                        hit_dict["title"] = "".join(hit["highlight"]["title"])
                    else:
                        hit_dict["title"] = hit["_source"]["title"]
                    if "job_desc" in hit["highlight"]:
                        hit_dict["content"] = "".join(hit["highlight"]["job_desc"])[:500]
                    else:
                        hit_dict["content"] = hit["_source"]["job_desc"][:500]
                except Exception as e:        #如果数据获取出现异常跳出当前循环，执行下一次循环
                    continue

                hit_dict["create_time"] = hit["_source"]["publish_time"].split('T')[0]
                hit_dict["url"] = hit["_source"]["url"]
                hit_dict["origin"] = hit["_source"]["company_name"]
                hit_dict["origin_url"] = hit["_source"]["url"]
                hit_dict["score"] = hit["_score"]
                hit_list.append(hit_dict)

        end_time = datetime.now()
        # 获取执行查询用了多少秒
        last_seconds = (end_time - start_time).total_seconds()
        total_nums = response["hits"]["total"]
        # 页码
        if (page % 10) > 0:
            page_nums = int(total_nums / 10) + 1
        else:
            page_nums = int(total_nums / 10)

        return render(request, "result.html", {"page": page,
                                               "all_hits": hit_list,
                                               "key_words": key_words,
                                               "total_nums": total_nums,
                                               "page_nums": page_nums,
                                               "last_seconds": last_seconds,
                                               "51cto_count": wuyicto_count,
                                               'today_job_count': today_job_count,
                                               'Guokr_question_count':Guokr_question_count,
                                               "s_type":s_type,
                                               "topn_search": topn_search})
