import ast
import json
import os
import os.path
from datetime import date, timedelta
from datetime import datetime
from django.utils import timezone

import pandas as pd
from django.conf import settings
from django.contrib.auth.models import User
from django.core.cache import cache
from django.db.models import Count, Q
from django.http import FileResponse, JsonResponse
from django.shortcuts import get_object_or_404, HttpResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework import filters
from rest_framework import generics, permissions, status, viewsets
from rest_framework.decorators import action, api_view, permission_classes
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework.serializers import DateField
from rest_framework.views import APIView
from rest_framework_simplejwt.authentication import JWTStatelessUserAuthentication
from rest_framework_simplejwt.tokens import RefreshToken

from ai.admin import ArticleResource
from ai.models import Article
from ai_info_server.utils import get_keywords_id, parse_category, merge_category_articles_by_name
from init_data.import_data import ArticleImporter
from utils.word_cloud import create_word_cloud
from .models import AIExcelFile, ArticleCategory, ArticleKeyword, get_article_keywords_frequency, UserKeywordFocus
from .pagenation import MobileCursorPagination, StandardResultsSetPagination
from .serializers import ArticleCategorySerializer, ArticleKeywordSerializer, ArticleSerializer, \
    UserKeywordFocusCreatedSerializer, UserKeywordFocusSerializer, UserRegSerializer, UserSerializer
import time

from logger.logger_handler import Logger

logger = Logger(logger='views.py').get_logger()

# logger = logging.getLogger()
# logger.setLevel(logging.NOTSET)
# import_article_logger = logging.getLogger('import_article_logger')
# handler = logging.FileHandler(os.path.join(settings.BASE_DIR, 'logs', 'import_article.log'))
# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# handler.setFormatter(formatter)
# import_article_logger.addHandler(handler)

def timeit(func):
    def wrapper(*args, **kwargs):
        start = time.time()
        result = func(*args, **kwargs)
        print(f'{func.__name__} cost {time.time() - start}')
        return result

    return wrapper


# 搜索关键词
articleExcelImporter = ArticleImporter()


class CategoryFilter(filters.SearchFilter):
    def filter_queryset(self, request, queryset, view):
        category_id = request.query_params.get('category__id')
        if category_id:
            category = ArticleCategory.objects.filter(id=category_id).first()
            if category:
                queryset = queryset.filter(category__in=category.get_descendants(include_self=True))
        return queryset


class JWTMixin(object):
    permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
    authentication_classes = [JWTStatelessUserAuthentication, ]


# Create your views here.
class ArticleViewSet(JWTMixin, viewsets.ModelViewSet):
    serializer_class = ArticleSerializer
    pagination_class = MobileCursorPagination

    # 这段代码会被多个中间件调用, 代码逻辑没什么问题，需要优化下性能，避免这种不必要的多次查询

    def get_queryset(self):
        """
        category 前端传递的分类查询标志，用于分类筛选
        重新定义是为了在将文章分类的子分类结果一并返回展示
        """
        category_id = self.request.query_params.get('category')
        queries = ~Q(risk="有")
        keywords = self.request.GET.getlist('keywords[]')
        if category_id:
            category = ArticleCategory.objects.filter(id=category_id).first()
            if category is not None:
                all_category = category.get_descendants(include_self=True)
                queries &= Q(category__in=all_category)

        if len(keywords):
            queries &= Q(keywords__in=keywords)

        queryset = Article.objects.filter(queries).distinct()

        return queryset

    @action(
        methods=["GET"], detail=False, url_path="archive/dates", url_name="archive-date"
    )
    def list_archive_dates(self, request, *args, **kwargs):
        dates = Article.objects.dates("create_at", "day", order="DESC")
        date_field = DateField()
        data = [date_field.to_representation(date) for date in dates]
        return Response(data=data, status=status.HTTP_200_OK)


# 先按照关键词出现次数排序，同时筛序出目标集合
def get_queryset_order_by_match_count(_search_keywords):
    # 通过关键词过滤文章
    queries = Q()
    for keyword in _search_keywords:
        queries |= Q(keywords__text__icontains=keyword)
    queries &= Q(risk=Article.NO_RISK)

    articles = (Article.objects.prefetch_related("keywords").filter(queries)
                .annotate(
        match_count=Count('keywords')
    ).order_by('-match_count'))
    return articles


def get_queryset_for_raw_article_content(_search_keywords):
    # 通过关键词过滤文章
    queries = Q()
    for keyword in _search_keywords:
        queries &= Q(content__icontains=keyword)
    queries &= Q(risk=Article.NO_RISK)

    return Article.objects.filter(queries).order_by('-public_time')

# # 计算搜索关键词字符串数组在每篇文章的关键词出现次数，如果关键词出现，则 count 为 1， 重复出现则 count 不变
def count_keywords(article, _search_keywords):
    count = 0
    keywords_raw_lower = article.keywords_raw.lower()
    for sk in _search_keywords:
        if sk.lower() in keywords_raw_lower:
            count += 1

    return count


def sorted_articles_queryset(_articles, _search_keywords):
    instances = []
    for article in _articles:
        article.count = count_keywords(article, _search_keywords)
        article.month_range = article.public_time.strftime("%Y%m")
        instances.append(article)

    sorted_instances = sorted(instances, key=lambda x: (x.month_range, x.count, x.public_time), reverse=True)
    return sorted_instances


# 通过搜索关键词过滤文章, 安装关键词出现次数排序，优先非重复次数，相同则按照总的次数排序
class UserFocusEventArticleViewSet(JWTMixin, viewsets.ReadOnlyModelViewSet):
    serializer_class = ArticleSerializer
    pagination_class = StandardResultsSetPagination
    ordering = None

    def get_queryset(self):
        search_keywords = self.request.GET.getlist('search_keywords[]')
        # articles = get_queryset_order_by_match_count(search_keywords)
        # articles =
        # sorted_instances = sorted_articles_queryset(articles, search_keywords)
        return get_queryset_for_raw_article_content(search_keywords)


class ArticleKeywordViewSet(JWTMixin, viewsets.ReadOnlyModelViewSet):
    queryset = ArticleKeyword.objects.filter(value__gt=0)
    pagination_class = StandardResultsSetPagination
    serializer_class = ArticleKeywordSerializer
    filter_backends = (filters.SearchFilter, filters.OrderingFilter)

    def get_queryset(self):
        """
        category 前端传递的分类查询标志，用于分类筛选
        重新定义是为了在将文章分类的子分类结果一并返回展示
        """
        queryset = ArticleKeyword.objects.filter(value__gt=0)
        category_id = self.request.query_params.get('category')
        search = self.request.query_params.get('search')
        queries = Q(risk=Article.NO_RISK)

        if category_id:
            category = ArticleCategory.objects.filter(id=category_id).first()
            if category is not None:
                all_category = category.get_descendants(include_self=True)
                queries &= Q(category__in=all_category)

            articles = Article.objects.filter(queries).distinct()
            queryset = queryset.filter(article__in=articles)

        if search:
            queryset = queryset.filter(text__contains=search)

        return queryset.distinct()

    def get_serializer_context(self):
        context = super().get_serializer_context()
        category_id = self.request.query_params.get('category')
        context['category_id'] = category_id
        return context


class ArticleKeywordOptionView(JWTMixin, generics.ListAPIView):
    permission_classes = (AllowAny,)
    pagination_class = None
    serializer_class = ArticleKeywordSerializer

    def get_queryset(self):
        queryset = ArticleKeyword.objects.filter(value__gt=0)
        category_id = self.request.query_params.get('category')
        search = self.request.query_params.get('search', None)

        queries = Q(risk=Article.NO_RISK)

        if category_id:
            category = ArticleCategory.objects.filter(id=category_id).first()
            if category is not None:
                all_category = category.get_descendants(include_self=True)
                queries &= Q(category__in=all_category)

            articles = Article.objects.filter(queries).distinct()
            queryset = queryset.filter(article__in=articles)

        if not search is None:
            queryset = queryset.filter(text__icontains=search).distinct().order_by(
                '-value')
        else:
            queryset = queryset.distinct().order_by(
                '-value')[:300]

        return queryset

    def list(self, request, *args, **kwargs):
        queryset = self.filter_queryset(self.get_queryset())

        page = self.paginate_queryset(queryset)
        if page is not None:
            serializer = self.get_serializer(page, many=True)
            return self.get_paginated_response(serializer.data)

        serializer = self.get_serializer(queryset, many=True)

        customer_data = {
            "data": serializer.data,
            "total_article_count": Article.objects.filter(keywords__in=queryset).distinct().count()
        }
        return Response(customer_data)

    def get_serializer_context(self):
        context = super().get_serializer_context()
        category_id = self.request.query_params.get('category')
        context['category_id'] = category_id
        return context


class ArticleCategoryViewSet(JWTMixin, viewsets.ReadOnlyModelViewSet):
    pagination_class = None
    queryset = ArticleCategory.objects.filter(parent=None)
    serializer_class = ArticleCategorySerializer


class UserKeywordFocusViewSet(viewsets.ModelViewSet):
    pagination_class = None
    serializer_class = UserKeywordFocusSerializer
    permission_classes = (permissions.IsAuthenticated,)

    def get_queryset(self):
        user = self.request.user
        return UserKeywordFocus.objects.filter(user=user).order_by("-updated_at")

    def get_serializer_class(self):
        if self.action == "list":
            return UserKeywordFocusSerializer
        elif self.action == "create" or self.action == "update":
            return UserKeywordFocusCreatedSerializer

        return super().get_serializer_class()  # 添加这一行


# 默认生成分类词云图 5分钟一次，也可以加上 force_update 强制刷新
def get_word_cloud_image(request):
    categories = request.GET.getlist('categories')
    force_update = request.GET.get('force_update')

    categories.sort()
    all_category = []
    queries = Q()
    file_names = []
    for category in categories:
        instance = ArticleCategory.objects.filter(pk=category).first()
        if instance is not None:
            file_names.append(instance.name)
            all_category.extend(instance.get_descendants(include_self=True))

    unique_all_category = list(set(all_category))
    for pk in unique_all_category:
        queries |= Q(category__exact=pk)

    articles = Article.objects.filter(queries).distinct()
    all_keywords = articles.values_list('keywords', flat=True).distinct()
    word_data = get_article_keywords_frequency(all_keywords)
    wordcloud_image_name = '_'.join(file_names)

    cache_image = cache.get(wordcloud_image_name)
    if cache_image and force_update is None:
        with open(cache_image, 'rb') as file:
            return FileResponse(file)
    else:
        file_path = os.path.join(settings.MEDIA_ROOT, 'images', f'{wordcloud_image_name}.png')
        create_word_cloud(word_data, file_path)
        cache.set(wordcloud_image_name, file_path, 5 * 60)
        with open(file_path, 'rb') as file:
            return FileResponse(file)


@csrf_exempt
def upload_excel(request):
    if request.method == "POST":
        file = request.FILES['file']
        today = datetime.today()

        dynamic_path = today.strftime("%Y_%m_%d")  # 替换为你的动态字段

        excel_dir = os.path.join(settings.MEDIA_ROOT, "upload", "excels", dynamic_path)
        if not os.path.exists(excel_dir):
            os.makedirs(excel_dir)

        file_path = os.path.join(settings.MEDIA_ROOT, "upload", "excels", dynamic_path, file.name)
        instance_file_path = os.path.join("upload", "excels", dynamic_path, file.name)
        with open(file_path, 'wb+') as fp:
            for info in file.chunks():
                fp.write(info)

        try:
            existing_model = AIExcelFile.objects.get(file=instance_file_path)
        except AIExcelFile.DoesNotExist:
            existing_model = None

        real_instance = existing_model
        # 构建最终的文件路径
        if existing_model:
            # 如果模型已存在，则更新它
            existing_model.file = instance_file_path
            existing_model.sync_status = False
            existing_model.save()
        else:
            # 创建新的模型
            new_instance = AIExcelFile(file=instance_file_path)
            new_instance.save()
            real_instance = new_instance

        try:
            articleExcelImporter.import_data(file_name=real_instance.file)
            real_instance.sync_status = True
            real_instance.save()
            return HttpResponse("upload by POST successful and import data!")

        except ImportError as exception:
            return HttpResponse("upload by POST successful, but import data error!")

    else:
        return HttpResponse("get successful")


@csrf_exempt
def set_risk(request):
    if request.method == "POST":
        try:
            data = request.body.decode("utf-8")
            json_data = json.loads(data)
            url = json_data.get("url")
            risk = json_data.get("risk")
            risk_article = get_object_or_404(Article, url=url)
            if risk is not None:
                risk_article.risk = Article.HAS_RISK if risk else Article.NO_RISK
                risk_article.save()
                return JsonResponse({
                    "success": True,
                    "code": 200,
                    "message": "ok"
                })
            else:
                return JsonResponse({
                    "error": True,
                    "code": 500,
                    "message": f"参数错误"
                })

        except Exception as e:
            return JsonResponse({
                "error": True,
                "code": 500,
                "message": f"happen error {e}"
            })


def export_today_data(request):
    today = date.today()
    dynamic_path = today.strftime("%Y_%m_%d")  # 替换为你的动态字段
    excel_file_writer = f'风险文章_{dynamic_path}.xlsx'
    excel_file_name = f'风险文章_{dynamic_path}.xlsx'
    queryset = Article.objects.filter(create_at__date=today, risk=Article.HAS_RISK)
    resource = ArticleResource()
    dataset = resource.export(queryset)

    # 导出数据到DataFrame
    dataframe = pd.DataFrame(dataset.dict)

    # 创建Excel文件
    dataframe.to_excel(excel_file_writer, index=False, sheet_name='Sheet1')

    with open(excel_file_name, 'rb') as excel:
        data = excel.read()

    os.remove(excel_file_name)

    # 构建HTTP响应
    response = HttpResponse(data, content_type="application/vnd.ms-excel")
    response['Content-Disposition'] = f'attachment; filename="{excel_file_name.encode("utf-8").decode("ISO-8859-1")}"'

    return response


@api_view(['GET'])
@permission_classes([IsAuthenticated])
def get_profile(request):
    user = request.user
    serializer = UserSerializer(user, many=False)
    return Response(serializer.data)


@api_view(['POST'])
@permission_classes([AllowAny, ])
def create_article_by_analysis(request):
    # 模型分析的文章导入
    # TODO: 添加内外IP访问限制，安全性考虑
    try:

        # Query Dict
        # https://docs.djangoproject.com/en/4.2/ref/request-response/#django.http.QueryDict
        # 获取传递的数据
        # 获取请求的原始数据
        raw_data = request.body.decode("utf-8")
        # 将原始数据转换成Python字典
        post_data = json.loads(raw_data)
        # 统计文章出现的关键词集合，便于后续更新关键词的出现次数
        add_or_create_articles_keywords_set = set()

        # 获取传递的具体数据属性
        title = post_data.get("title")
        url = post_data.get("url")
        thumb = post_data.get("thumb")

        #  发布时间处理成无时区信息的
        public_time_data = post_data.get("public_time")
        public_naive_time = datetime.strptime(public_time_data, "%Y-%m-%d %H:%M")
        public_time = timezone.make_aware(public_naive_time)

        content = post_data.get("content")
        abstract = post_data.get("abstract")
        keywords_raw = post_data.get("keywords_raw")
        keyword_contents = post_data.get("keyword_contents")

        categories_raw = post_data.get("categories_raw")

        author = post_data.get("author")
        article_source = post_data.get("article_source")

        risk = Article.NO_RISK if post_data.get("risk") == 0 else Article.HAS_RISK

        article_instance = Article.objects.filter(url__exact=url).first()

        is_article_exists = True if article_instance else False

        if is_article_exists:

            article = article_instance

            article.title = title
            article.url = url
            article.thumb = thumb
            article.public_time = public_time
            article.content = content
            article.abstract = abstract
            article.keywords_raw = keywords_raw
            article.keyword_contents = keyword_contents
            article.categories_raw = categories_raw
            # 风险的结果不能更新，因为飞书会回传设置操作结果，不能覆盖
            # article.risk = risk
            article.author = author
            article.article_source = article_source
        else:
            article = Article.objects.create(
                title=title,
                url=url,
                thumb=thumb,
                public_time=public_time,
                content=content,
                abstract=abstract,
                keywords_raw=keywords_raw,
                keyword_contents=keyword_contents,
                categories_raw=categories_raw,
                risk=risk,
                author=author,
                article_source=article_source,
            )
        article.save()

        article.keywords.set(get_keywords_id(keywords_raw))
        parse_category(categories_raw, article)
        article_keywords = article.keywords.all()
        add_or_create_articles_keywords_set.update(article_keywords)

        # 更新关键词出现次数
        for keyword in add_or_create_articles_keywords_set:
            keyword.value = keyword.article_set.count()
            keyword.save()
        logger.info(f"文章导入: {title}")

        return JsonResponse({
            "success": True,
            "code": 200,
            "message": "ok"
        })
    except Exception as e:
        logger.error(f"文章导入失败:  {e}")

        return JsonResponse({
            "error": True,
            "code": 500,
            "message": f"happen error {e}"
        })


class RegisterView(generics.CreateAPIView):
    queryset = User.objects.all()
    permission_classes = (AllowAny,)
    serializer_class = UserRegSerializer


@api_view(['PUT'])
@permission_classes([IsAuthenticated])
def update_profile(request):
    user = request.user
    serializer = UserSerializer(user, data=request.data, partial=True)
    if serializer.is_valid():
        serializer.save()
    return Response(serializer.data)


class BlacklistRefreshView(APIView):
    # refresh token 刷新时，access token 加入黑名单
    def post(self, request):
        token = RefreshToken(request.data.get('refresh'))
        token.blacklist()
        return Response("Success", status=200)


@api_view(['GET'])
@permission_classes([AllowAny, ])
def article_analysis_by_keyword(request):
    keyword_id = request.query_params.get("keyword_id")

    category_id = request.query_params.get("category_id")
    if category_id is not None:
        category = ArticleCategory.objects.filter(id=category_id).first()

    if keyword_id is None:
        return Response([])
    else:
        keyword = ArticleKeyword.objects.filter(id=keyword_id).first()
        if keyword is None:
            return Response([])
        else:
            end_date = date.today()
            start_date = end_date - timedelta(days=29)
            queries = Q(public_time__range=[start_date, end_date])
            if category_id is not None:
                queries &= Q(category=category)
            articles = keyword.article_set.filter(queries)
            serializer = ArticleSerializer(data=articles, many=True)
            serializer.is_valid()
            return Response(serializer.data)


def merge_category_view(request):
    target_category = request.GET.get('target')
    source_category = request.GET.get('source')
    merge_category_articles_by_name(target_category, source_category)
    return HttpResponse(200)
