import random
from datetime import datetime as dt
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework.permissions import AllowAny
from django.utils.datastructures import OrderedSet

import os
import json
import math
from tqdm import tqdm
from collections import defaultdict
from transformers import BertTokenizer


class NewsDataLoader:
    def __init__(self, read_path) -> None:
        self.cache = []

        with open("stop_words.txt", "r", encoding='utf-8') as f:
            self.stop_words = [line.strip() for line in f.readlines()]
        
        with open(read_path, 'r') as f:
            lines = f.readlines()
            for line in lines:
                line = line.strip()
                new_news = json.loads(line)
                # print(new_news['time'])
                self.cache.append(new_news)

        self.tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
        self.get_postings()
        self.doc_nums = len(self.cache)

    def get_postings(self):
        doc_nums = 0
        self.postings = defaultdict(dict)
        if os.path.exists("postings.json"):
            with open("postings.json", "r", encoding='utf-8') as f:
                self.postings = json.load(f)
            return

        for news in tqdm(self.cache, ncols=100):
            doc_nums += 1
            line = self.tokenizer.tokenize(news["content"])
            d_num = {}
            for te in line:
                if te in d_num.keys():
                    d_num[te] += 1
                else:
                    d_num[te] = 1
            for te in d_num.keys():
                d_num[te] = math.log(d_num[te]) + 1

            # normalize 
            nor = 0
            for te in d_num.keys():
                nor = nor + d_num[te]
            if nor == 0:
                print(line)
            nor = 1.0 / math.sqrt(nor)
            for te in d_num.keys():
                d_num[te] = d_num[te] * nor 

            unique_terms=set(line)
            for te in unique_terms:
                self.postings[te][news["id"]] = d_num[te]

        with open("postings.json", "w", encoding='utf-8') as f:
            json.dump(self.postings, f)

    

short_length = 100
detailedPageRoute = "/profile/basic?id="
news_loader = NewsDataLoader("../../all_news.jsonl")


# Create your views here.
class ListNews(APIView):
    '''
    根据输入返回搜索建议的接口
    '''
    permission_classes = [AllowAny]

    def get(self, request):
        page_size = int(request.query_params.get("count", ""))
        offSet = int(request.query_params.get("offSet", ""))
        short = bool(request.query_params.get("short", False))
        categories = str(request.query_params.get("categories", ""))
        sources = str(request.query_params.get("sources", ""))
        time_range = str(request.query_params.get("time", ""))
        query = str(request.query_params.get("query", "")) # new parameter
        is_boolean = request.query_params.get("is_boolean", "false")
        is_boolean = (is_boolean == "true")
        # top_k = int(request.query_params.get("k", ""))

        if not is_boolean: # query
            query = list(filter(lambda x: x not in news_loader.stop_words, set(news_loader.tokenizer.tokenize(query))))
            cand_list = self.tfidf_score_rank(query)
        else: # TODO
            cand_list = news_loader.cache
        
        time_filter = []
        if time_range != "":
            time_filter = [dt.strptime(x, "%Y/%m/%d") for x in time_range.split("-")]
        cont_place = False

        if len(categories) > 0:
            cand_list = list(filter(lambda x: x["category"] in categories, cand_list))
        if len(sources) > 0:
            cand_list = list(filter(lambda x: x["source"] in sources, cand_list))
        if len(time_filter) > 0:
            cand_list = list(filter(lambda x: dt.strptime(x["time"], "%Y-%m-%d") >= time_filter[0] and dt.strptime(x["time"], "%Y-%m-%d") <= time_filter[1], cand_list))
        if offSet >= len(cand_list):
            full_return_list = []
        elif offSet + page_size >= len(cand_list):
            full_return_list = cand_list[offSet: -1]
        else:
            full_return_list = cand_list[offSet: offSet + page_size]
            cont_place = True
        # add id information
        for idx, new in enumerate(full_return_list):
            new["href"] = detailedPageRoute + str(new["id"])
            full_return_list[idx] = new
        if short:
            return_list = []
            for news in full_return_list:
                short_news = news.copy()
                splitted_news = news["content"].split()
                short_news["content"] = " ".join(splitted_news[0:min(short_length, len(splitted_news))]) + " ..."
                return_list.append(short_news)
        else:
            return_list = full_return_list
        # return_list = return_list[0:3]
        # return_list = [news_loader.cache[0]]'
        return Response({"list": return_list, "cont": cont_place}, status=status.HTTP_200_OK)

    def tfidf_score_rank(self, query):
        if len(query) == 0:
            return news_loader.cache

        t_num = {}
        doc_nums = news_loader.doc_nums
        score_tid = defaultdict(dict)

        for te in query:
            if te in t_num:
                t_num[te] += 1
            else:
                t_num[te] = 1

        for te in t_num.keys():
            if te in news_loader.postings:
                d_fre = len(news_loader.postings[te]) # document frequency, how many documents contain this term 
            else:
                d_fre = doc_nums # tf*idf will be 0
            # t_num[te] = math.log(doc_nums/d_fre)
            t_num[te] = (math.log(t_num[te])+1) * math.log(doc_nums/d_fre) # the former is the tf in this query

        for te in query:
            if te in news_loader.postings:
                for tid in news_loader.postings[te]:
                    if tid in score_tid.keys():
                        score_tid[tid] += news_loader.postings[te][tid] * t_num[te] # tf*idf
                    else:
                        score_tid[tid] = news_loader.postings[te][tid] * t_num[te]
        
        similarity = sorted(score_tid.items(), key=lambda x: x[1], reverse=True)
        cand_list = [news_loader.cache[int(x[0])] for x in similarity]
        return cand_list


class ProfileNews(APIView):
    permission_classes = [AllowAny]

    def get(self, request):
        id = int(request.query_params.get("id", 0))
        new = news_loader.cache[id]
        return Response({"news": new}, status=status.HTTP_200_OK)


class NewsCategory(APIView):
    permission_classes = [AllowAny]
    def get(self, request):
        categories = list(set([x["category"] for x in news_loader.cache]))
        return Response({"list": categories}, status=status.HTTP_200_OK)


class NewsSource(APIView):
    permission_classes = [AllowAny]
    def get(self, request):
        sources = list(set([x["source"] for x in news_loader.cache]))
        return Response({"list": sources}, status=status.HTTP_200_OK)