import time
import umsgpack  # 数据序列化用到的包
from django.shortcuts import render
from django.http import HttpResponse, JsonResponse
from django.views.generic import View
from django_redis import get_redis_connection
from apps.article import models
from apps.wx_offical_account.models import WxOfficalAccount
from utils.tools import myTools
# Create your views here.
from django.conf import settings
from django.utils import timezone
# 导入自定义优先级队列的对象
from utils.redis_queue.priority_redis_query import Priority_query

class SpiderShowView(View):
    def get(self, request):
        return render(request, 'spider_page.html')


# 自动抓取列表页数据
class SpiderAutoView(View):
    ''''''

    def get(self, request):
        redis_conn = get_redis_connection()
        current_user_agent = request.META.get("HTTP_USER_AGENT")
        # 1. 从数据库获取可以抓取的公众号
        wx_official_list = WxOfficalAccount.objects.filter(is_delete=0, state=0, fake_id__isnull=False)
        # 2. 从redis中获取cookie池
        cookie_dict = myTools.random_headers(settings.COOKIES_POOLS_NAME)
        if cookie_dict is None:
            return JsonResponse({"status": False, "message": "cookie池为空,无法采集公众号", "err_code": 7})
        # 3. 死循环抓取数据
        for index in wx_official_list:
            # 校验这个公众号的状态是否可以爬
            spider_interval = (timezone.now() - index.spider_time).days  # 获取上一次爬取的时间差 相隔的天数
            if spider_interval < 1:
                print("*"*10,"公众号:\t",index.org_name, spider_interval,"天前有抓取过","\t最近抓取时间：\t", index.spider_time,"\t当前时间",timezone.now())
                continue  # 直接跳出  不进行抓取
            while True:
                print("当前抓取",index.org_name)
                response_data = myTools.crawl_org_list(fakeid=index.fake_id, token=cookie_dict['token'],
                                                       cookies_str=cookie_dict['cookie'],
                                                       useragent=current_user_agent)
                # 如果cookie失效就要删除redis的cookie和token
                if response_data['base_resp'].get("err_msg", None) != "ok":
                    myTools.del_cookies(settings.COOKIES_POOLS_NAME, cookie_dict['user_name'])
                    continue
                myTools.parse_list_data(index, current_user_agent, **response_data)  # 解析数据并入库
                # 抓取完了存到redis 和 mysql
                print(index.article_type, index.user_for, index.org_name)
                break  # 爬取成功跳出当前循环
        link_count = redis_conn.zcard(settings.ARTICLE_URL_QUEUE_NAME) # 把当前redis存在的数据量传过去
        return JsonResponse({"status": True, "message": "抓取成功", "err_code": 0, "link_count":link_count})

# 监控后端抓取状态
class ShowStatusView(View):
    def get(self, request):
        time.sleep(1)  # 延时一秒获取redis数据的状态
        redis_conn = get_redis_connection()
        # 获取redis中待抓取的url数量
        current_urls_num = redis_conn.zcard(settings.ARTICLE_URL_QUEUE_NAME)
        return HttpResponse(current_urls_num)
