import random
from datetime import datetime as dt
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework.permissions import AllowAny
from django.utils.datastructures import OrderedSet

import os
import json
import math
from tqdm import tqdm
from collections import defaultdict
from transformers import BertTokenizer

from elasticsearch import Elasticsearch
import collections

from luqum.elasticsearch import ElasticsearchQueryBuilder
from luqum.parser import parser

class NewsDataLoader:
    def __init__(self, read_path) -> None:
        self.cache = []

        with open("stop_words.txt", "r", encoding='utf-8') as f:
            self.stop_words = [line.strip() for line in f.readlines()]
            
        with open(read_path, 'r') as f:
            lines = f.readlines()
            for line in lines:
                line = line.strip()
                new_news = json.loads(line)
                # print(new_news['time'])
                self.cache.append(new_news)

        self.tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
        self.get_postings()
        self.doc_nums = len(self.cache)

    def get_postings(self):
        doc_nums = 0
        self.postings = defaultdict(dict)
        if os.path.exists("postings.json"):
            with open("postings.json", "r", encoding='utf-8') as f:
                self.postings = json.load(f)
            return

        for news in tqdm(self.cache, ncols=100):
            doc_nums += 1
            line = self.tokenizer.tokenize(news["content"])
            d_num = {}
            for te in line:
                if te in d_num.keys():
                    d_num[te] += 1
                else:
                    d_num[te] = 1
            for te in d_num.keys():
                d_num[te] = math.log(d_num[te]) + 1

            # normalize 
            nor = 0
            for te in d_num.keys():
                nor = nor + d_num[te]
            if nor == 0:
                print(line)
            nor = 1.0 / math.sqrt(nor)
            for te in d_num.keys():
                d_num[te] = d_num[te] * nor 

            unique_terms=set(line)
            for te in unique_terms:
                self.postings[te][news["id"]] = d_num[te]

        with open("postings.json", "w", encoding='utf-8') as f:
            json.dump(self.postings, f)

    

short_length = 100
detailedPageRoute = "/profile/basic?id="
news_loader = NewsDataLoader("../../all_news.jsonl")


# Create your views here.
class ListNews(APIView):
    '''
    根据输入返回搜索建议的接口
    '''

    def handwritten_boolean(self, query: str, tokenizer: BertTokenizer):
        def _op(operator, left_operand: set, right_operand=None):
            assert operator in ["AND", "OR", "NOT"], "Operator should be in ['AND', 'OR', 'NOT'], but get {}!!!".format(operator)
            if operator == "AND":
                assert right_operand is not None, "The right operand should not be None when the operator is AND!!!"
                return left_operand.intersection(right_operand)
            elif operator == 'OR':
                assert right_operand is not None, "The right operand should not be None when the operator is OR!!!"
                return left_operand.union(right_operand)
            else:
                return all_docs - left_operand

        def _parse_query(query_units: list):
            operator_stack = []
            operand_stack = []

            for unit in query_units:
                if unit in ["AND", "OR", "NOT", "(", ")"]: # operators
                    cur_prece = precedence[unit]
                    stack_prece = -100 if len(operator_stack) == 0 else precedence[unit]
                    if cur_prece > stack_prece:
                        operator_stack.append(unit)
                    else: # can calculate the operators in the stack with lower of eq precedence
                        if unit == ")":
                            while len(operator_stack) and operator_stack[-1] != "(":
                                op = operator_stack.pop()
                                left_operand = operand_stack.pop()
                                right_operand = operand_stack.pop() if op != "NOT" else None
                                res = _op(op, left_operand, right_operand)
                                operand_stack.append(res)
                            operator_stack.pop() # popout "("
                        else:
                            while len(operator_stack) and operator_stack[-1] != "(" and cur_prece <= precedence[operator_stack[-1]]:
                                op = operator_stack.pop()
                                left_operand = operand_stack.pop()
                                right_operand = operand_stack.pop() if op != "NOT" else None
                                res = _op(op, left_operand, right_operand)
                                operand_stack.append(res)
                            operator_stack.append(unit)
                else: # operands
                    try:
                        operand_stack.append(set(news_loader.postings[unit].keys()))
                    except KeyError:
                        operand_stack.append(set())
            
            while len(operator_stack):
                op = operator_stack.pop()
                left_operand = operand_stack.pop()
                right_operand = operand_stack.pop() if op != "NOT" else None
                res = _op(op, left_operand, right_operand)
                operand_stack.append(res)
            
            assert len(operand_stack) == 1, "Operand_stack should contain only one element after processing, but length is {}".format(len(operand_stack))
            return operand_stack[-1]
    
        # define precedences
        precedence = {}
        precedence['NOT'] = 3
        precedence['AND'] = 2
        precedence['OR'] = 1
        precedence['('] = 100
        precedence[')'] = -100

        all_docs = set([str(x) for x in range(news_loader.doc_nums)])
        query = "( ".join(" )".join(query.split(")")).split("(")) # add space after "(" and before ")"
        ori_query_units = query.split()
        query_units = []
        for unit in ori_query_units:
            if unit not in ["AND", "OR", "NOT", "(", ")"]:
                sub_tokens = tokenizer.tokenize(unit)
                if len(sub_tokens) > 1:
                    query_units.append("(")
                    query_units.append(sub_tokens[0] if sub_tokens[0] != '[UNK]' else 'fuck')
                    for token in sub_tokens[1:]:
                        query_units.append("AND")
                        query_units.append(token if token != '[UNK]' else 'fuck')
                    query_units.append(")")
                else:
                    query_units.append(sub_tokens[0])
            else:
                query_units.append(unit)

        cand_ids = _parse_query(query_units)
        cand_list = [news_loader.cache[int(x)] for x in cand_ids]
        return cand_list

    def tfidf_score_rank(self, query):
        if len(query) == 0:
            return news_loader.cache

        t_num = {}
        doc_nums = news_loader.doc_nums
        score_tid = defaultdict(dict)

        for te in query:
            if te in t_num:
                t_num[te] += 1
            else:
                t_num[te] = 1

        for te in t_num.keys():
            if te in news_loader.postings:
                d_fre = len(news_loader.postings[te]) # document frequency, how many documents contain this term 
            else:
                d_fre = doc_nums # tf*idf will be 0
            # t_num[te] = math.log(doc_nums/d_fre)
            t_num[te] = (math.log(t_num[te])+1) * math.log(doc_nums/d_fre) # the former is the tf in this query

        for te in query:
            if te in news_loader.postings:
                for tid in news_loader.postings[te]:
                    if tid in score_tid.keys():
                        score_tid[tid] += news_loader.postings[te][tid] * t_num[te] # tf*idf
                    else:
                        score_tid[tid] = news_loader.postings[te][tid] * t_num[te]
        
        similarity = sorted(score_tid.items(), key=lambda x: x[1], reverse=True)
        cand_list = [news_loader.cache[int(x[0])] for x in similarity]
        return cand_list

    def es_bool_search(self,query,offSet,page_size,time_range,categories,sources):
        es = Elasticsearch("http://localhost:9200")
        es_query=''
        if time_range != "":
            time_filter = [dt.strptime(x, "%Y/%m/%d") for x in time_range.split("-")]
            es_query+='time:['+str(time_filter[0].date())+' TO '+str(time_filter[1].date())+']'

        if len(categories) > 0:
            if es_query != "":
                es_query+=' AND '
            es_query+='category:('+' OR '.join(categories)+')'

        if len(sources) > 0:
            if es_query != "":
                es_query+=' AND '
            es_query+='source:('+' OR '.join(sources)+')'

        if query != "":
            if es_query != "":
                es_query+=' AND '
            es_query+='content:('+query+')'


        es_builder = ElasticsearchQueryBuilder()
        tree = parser.parse(es_query)
        query_f = es_builder(tree)
        print(query_f)
        body={"query":query_f,"highlight": {"fields": {"content": {}}},"size": page_size,"from": offSet}
        resp = es.search(index="news", body=body)#查询文档
        return resp

    # =================================================================================
    permission_classes = [AllowAny]

    def get(self, request):
        page_size = int(request.query_params.get("count", ""))
        offSet = int(request.query_params.get("offSet", ""))
        short = bool(request.query_params.get("short", False))
        categories1=str(request.query_params.get("categories", ""))
        categories = list(map(lambda x: x.strip(),list(filter(lambda x: x!='', categories1.split("+")))))
        sources1=str(request.query_params.get("sources", ""))
        sources = list(map(lambda x: x.strip(), list(filter(lambda x: x != '', sources1.split("+")))))
        time_range = str(request.query_params.get("time", ""))
        query = str(request.query_params.get("query", "")) # new parameter
        search_type = str(request.query_params.get("type", ""))
        # top_k = int(request.query_params.get("k", ""))

        # if (not is_boolean) or (query == '' and is_boolean): # query
        if search_type == 'semantic' or query == '':
            query = list(filter(lambda x: x not in news_loader.stop_words, set(news_loader.tokenizer.tokenize(query))))
            cand_list = self.tfidf_score_rank(query)

        elif search_type == 'hand_bool':
            cand_list = self.handwritten_boolean(query, news_loader.tokenizer)

        else: #es_bool_search
            es_list =self.es_bool_search(query,offSet,page_size,time_range,categories,sources)
            return_list = []
            for i in es_list['hits']['hits']:
                if short:
                    if 'highlight' in i:
                        i['_source']['content']=" ... ".join(i['highlight']['content'])
                    else:
                        splitted_news = i['_source']["content"].split()
                        i['_source']["content"] = " ".join(splitted_news[0:min(short_length, len(splitted_news))]) + " ..."
                    return_list.append(i['_source'])
                else:
                    return_list.append(i['_source'])
            re_news_num=es_list['hits']['total']['value']
            cont_place= False
            if offSet + page_size < re_news_num:
                cont_place = True
            # add id information
            for idx, new in enumerate(return_list):
                new["href"] = detailedPageRoute + str(new["id"])
                return_list[idx] = new
            return Response({"list": return_list, "cont": cont_place}, status=status.HTTP_200_OK)
        
        time_filter = []
        if time_range != "":
            time_filter = [dt.strptime(x, "%Y/%m/%d") for x in time_range.split("-")]
        cont_place = False

        # print(cand_list)
        if len(categories) > 0:
            cand_list = list(filter(lambda x: x["category"].strip() in categories, cand_list))
        if len(sources) > 0:
            cand_list = list(filter(lambda x: x["source"].strip() in sources, cand_list))
        if len(time_filter) > 0:
            cand_list = list(filter(lambda x: dt.strptime(x["time"], "%Y-%m-%d") >= time_filter[0] and dt.strptime(x["time"], "%Y-%m-%d") <= time_filter[1], cand_list))
        if offSet >= len(cand_list):
            full_return_list = []
        elif offSet + page_size >= len(cand_list):
            full_return_list = cand_list[offSet: ]
        else:
            full_return_list = cand_list[offSet: offSet + page_size]
            cont_place = True
        # add id information
        for idx, new in enumerate(full_return_list):
            new["href"] = detailedPageRoute + str(new["id"])
            full_return_list[idx] = new
        if short:
            return_list = []
            for news in full_return_list:
                short_news = news.copy()
                splitted_news = news["content"].split()
                short_news["content"] = " ".join(splitted_news[0:min(short_length, len(splitted_news))]) + " ..."
                return_list.append(short_news)
        else:
            return_list = full_return_list
        # return_list = return_list[0:3]
        # return_list = [news_loader.cache[0]]'
        return Response({"list": return_list, "cont": cont_place}, status=status.HTTP_200_OK)


class ProfileNews(APIView):
    permission_classes = [AllowAny]

    def get(self, request):
        id = int(request.query_params.get("id", 0))
        new = news_loader.cache[id]
        return Response({"news": new}, status=status.HTTP_200_OK)


class NewsCategory(APIView):
    permission_classes = [AllowAny]
    def get(self, request):
        categories = list(set([x["category"] for x in news_loader.cache]))
        return Response({"list": categories}, status=status.HTTP_200_OK)


class NewsSource(APIView):
    permission_classes = [AllowAny]
    def get(self, request):
        sources = list(set([x["source"] for x in news_loader.cache]))
        return Response({"list": sources}, status=status.HTTP_200_OK)