import time, random, json
import hashlib
import oss2
import requests
from urllib.parse import quote
import umsgpack
from django.conf import settings
from django.utils import timezone  # django时间设置
from django_redis import get_redis_connection

from apps.article.models import ArticleModel, ArticleImgsModel
from apps.wx_offical_account.models import WxOfficalAccount
from apps.spider_account.models import WechatUserModel
from utils.redis_queue.priority_redis_query import Priority_query


def time_to_unix(unix):
    """时间转换成unix"""
    # 转换成时间数组
    time_array = time.strptime(unix, "%Y-%m-%d %H:%M:%S")
    # 转换成时间戳
    timestamp = time.mktime(time_array)
    return timestamp


def unix_to_time(timestamp):
    """unix时间戳转换成时间"""
    # 转换成localtime
    time_local = time.localtime(timestamp)
    # 转换成新的时间格式(2016-05-05 20:28:54)
    dt = time.strftime("%Y-%m-%d %H:%M:%S", time_local)
    return dt


def md5(string):
    """返回字符串的md5哈希值"""
    if isinstance(string, str):
        hash_object = hashlib.md5()
        hash_object.update(string.encode("utf-8"))
        return hash_object.hexdigest()
    else:
        return False


def upload_to_oss(old_link, useragent):
    """上传图片到oss存储"""
    try:
        logo_name = md5(old_link) + ".jpg"
        auth = oss2.Auth('LTAI4FjEyJMxz6WC6L3AbsUW', '1JJdHkbwTIkYgwqF0pJuoMigi5cGUH')
        bucket = oss2.Bucket(auth, 'oss-cn-shenzhen.aliyuncs.com', '51huoke')
        bucket.put_object("image/wx_official_logo/" + logo_name,
                          requests.get(old_link, headers={"user-agent": useragent}))
        logo_oss_name = "https://51huoke.oss-cn-shenzhen.aliyuncs.com/image/wx_official_logo/" + logo_name
        return logo_oss_name
    except Exception as e:
        print("**********  upload images error! *************")
        return None


def del_cookies(rand_keys_name, user):
    """删除redis中指定的cookies 和mysql中的cookies """
    redis_connect = get_redis_connection()
    try:
        redis_connect.hdel(rand_keys_name, user)
    except:
        print("*********** delete cookie error ! *****************")
    finally:
        redis_connect.close()
    # 获取数据库中
    users = WechatUserModel.objects.filter(wechat_code=user).exists()
    if users:
        user = WechatUserModel.objects.filter(wechat_code=user).first()
        user.token = None
        user.cookies = None
        user.save()


def random_headers(rand_keys_name):
    """随机从redis中取一个cookie 返回：字典格式的数据"""
    response_data = dict()
    redis_connect = get_redis_connection()
    pwd = list()  # 产生随机key列表
    res = redis_connect.hkeys(rand_keys_name)
    if len(res) < 1:  # redis中没有数据
        return None
    for i in res:
        pwd.append(i.decode())
    rand = random.choice(pwd)
    rand_data = json.loads(redis_connect.hget(rand_keys_name, rand))
    response_data["user_name"] = rand  # 用户名
    response_data["cookie"] = rand_data['Cookie']
    response_data['token'] = rand_data['token']
    return response_data


def crawl_org_list(fakeid, token, useragent, cookies_str):
    """抓取公众号对应的文章列表url 返回对应json格式的数据"""
    if isinstance(fakeid, str):
        url_encode_str = quote(fakeid, 'utf-8')
    else:
        raise Exception("传入的公众号名字必须是字符串类型的")

    urls = "https://mp.weixin.qq.com/cgi-bin/appmsg?action=list_ex&fakeid={fakeid}&query=&" \
           "begin=0&count=4&type=9&need_author_name=1&token={token}&lang=zh_CN&f=json&ajax=1".format(
        fakeid=url_encode_str, token=token)
    headers = {
        'authority': "mp.weixin.qq.com",
        'method': "GET",
        'path': "/cgi-bin/appmsg?action=list_ex&fakeid={fake_id}&query=&begin=0&count"
                "=4&type=9&need_author_name=1&token={token}&lang=zh_CN&f=json&ajax=1".format(
            fake_id=url_encode_str, token=token),
        'scheme': "https",
        'accept': "*/*",
        'accept-encoding': "gzip, deflate, br",
        'accept-language': "zh,en;q=0.9,zh-CN;q=0.8",
        'cookie': cookies_str,
        'referer': "https://mp.weixin.qq.com/cgi-bin/appmsg?"
                   "t=media/appmsg_edit_v2&action=edit&isNew=1&type=10&token={tokens}&lang=zh_CN".format(
            tokens=token),
        'user-agent': useragent,
        'x-requested-with': "XMLHttpRequest",
    }
    response = requests.get(url=urls, headers=headers)
    if response.status_code == 200:
        return response.json()
    else:
        return None


def parse_list_data(orgs, useragent, **data):
    query = Priority_query(query_name=settings.ARTICLE_URL_QUEUE_NAME)
    """ 专门处理微信公众号文章列表页的数据 """
    for index in data['app_msg_list']:
        author_name = index.get("author_name", None)
        link = index.get("link", None)
        md5_link = md5(link)
        digest = index.get("digest", None)
        title = index.get("title", None)
        update_time = index.get("update_time", None)
        create_time = index.get("create_time", None)
        cover = index.get("cover", None)

        # 上传文章url到指定redis数据库中
        redis_conn = get_redis_connection()
        redis_set_status = redis_conn.sadd(settings.ALL_ARTICLE_URL_POOLS_NAME, link)  # 把url插入url去重池
        if redis_set_status == 0:
            print("*" * 10, "article was already exists in redis !", "*" * 10, ":", link)
            break
        else:
            query.put_nowait((orgs.spider_priority, link))
            # 不存在 就要加入待抓取队列
        # 如果文章存在 就不在插入数据库
        if ArticleModel.objects.filter(article_link=link.strip()).exists():
            print("*" * 10, "article was already exists in mysql databases !", "*" * 10, ":", link)
            continue  # 跳出当前循环 进入下一轮循环
        if update_time:
            article_create = unix_to_time(int(update_time))
        else:
            article_create = timezone.now()
        if create_time:
            article_update = unix_to_time(int(create_time))
        else:
            article_update = timezone.now()

        # 保存文章数据到Mysql
        article = ArticleModel.objects.create(
            article_author=author_name,
            article_link=link,
            article_desc=digest,
            title=title,
            org_id=orgs,
            article_create_on=article_create,
            article_update_on=article_update,
            md5_link=md5_link,
            cover_img_link=cover if cover else "default:" + str(timezone.now()),
            md5_cover_link=md5(cover),
            org_name=orgs.org_name
        )
        # 处理缩略图  上传到OSS
        if cover:
            new_link = upload_to_oss(cover, useragent)
            if not new_link:  # 上传不成功
                img_obj = ArticleImgsModel.objects.create(
                    article=article,
                    img_oss_name=new_link,
                    img_oss_url=new_link,
                    img_wechat_url=cover,
                    update_state=2,
                    is_covers=1
                )
                img_obj.save()
            else:
                img_obj = ArticleImgsModel.objects.create(
                    article=article,
                    img_oss_name=new_link,
                    img_oss_url=new_link,
                    img_wechat_url=cover,
                    update_state=1,
                    is_covers=1
                )
                img_obj.save()
        article.save()

    # 修改当前公众号的抓取时间
    orgs.spider_time = timezone.now()
    orgs.save()

def spider_available(fakeid):
    org = WxOfficalAccount.objects.filter(fake_id=fakeid).exists()
    if not org:
        return False
    # 判断这个爬虫最近有没有抓取过
