import json
import time
import requests
import scrapy
from kuaishou_pro import settings
import redis
from kuaishou_pro.items import KuaishouProItem
import os
import pymysql

def get_key():
    r = redis.Redis(host=settings.REDIS_HOST,password=settings.REDIS_PASSWORD, port=settings.REDIS_PORT,db=settings.REDIS_DB)
    m_id = 0
    key = ''

    try:
        rs_key = 'merchant:kuaishou'
        url_obj = r.rpop(rs_key)
        # url_obj = '{"value": "https://www.kuaishou.com/profile/3xbjpz2sp8hrjby", "m_id": "69"}'
        print('获取redis信息--kuaishou')
        print(url_obj)
        if url_obj:
            obj = json.loads(url_obj)
            home_url = obj['value']
            m_id = obj['m_id']
            key = (home_url.split('?')[0]).rsplit('/', 1)[1]
            # 设置商户快手爬虫状态
            m_status_key = 'kuaishou:status:'+ str(m_id)
            r.set(m_status_key, 1)
        else:
            return m_id, key
    except:
        r.lpush(rs_key, url_obj)
    return m_id, key


class KuaishouSpider(scrapy.Spider):
    name = 'kuaishou'
    allowed_domains = []
    start_urls = ['https://www.kuaishou.com']
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36',
        'Cookie': 'didv=1602727908758; kpf=PC_WEB; kpn=KUAISHOU_VISION; clientid=3; did=web_50b8c84bbfcabce8bca085229dc76e7f; client_key=65890b29; userId=2607692425; kuaishou.server.web_st=ChZrdWFpc2hvdS5zZXJ2ZXIud2ViLnN0EqABodey7F8tQJDh6hKgVSu98XyzMOlM9G4fz4rI51c24WpRF8rUKk1ajW6PkPZ7tj9QGYHljW6WLHwNeXpLJvvqPxj0JRHYUPKkj5qmy4IpDvRXAKiFsob0Q0MoLxXK6RFzHmu3AOSZfEL_u2Vi-HSIUr4te42XQknSOLBvy_bimsQ1IUTJ8DLF3TTIrTpAQk9RM-8qrfE7zC8-NunmAM0V9xoS6NMArymx9EWAk7l-bjDNUah-IiAicJEi2yWAMHkc3L_dVgUL10FeCOXF5snL-S9cR8804SgFMAE; kuaishou.server.web_ph=36ad86bc803cbe875b98f7b62db3dfe78131',
        'Content-Type': 'application/json',
    }
    m_id, key = get_key()
    pcursor = '0'

    conn = pymysql.connect(host=settings.DB_HOST, database=settings.DB_DATABASE, user=settings.DB_USER,
                                password=settings.DB_PASSWORD)
    cursor = conn.cursor()
    # def start_requests(self):

    def parse(self, response):
        url = 'https://www.kuaishou.com/graphql'
        pcursor = self.pcursor

        while True:
            try :
                if (not self.key):
                    break
                data = {"operationName": "visionProfilePhotoList",
                        "variables": {"userId": self.key, "pcursor": pcursor, "page": "profile"},
                        "query": "query visionProfilePhotoList($pcursor: String, $userId: String, $page: String, $webPageArea: String) {\n  visionProfilePhotoList(pcursor: $pcursor, userId: $userId, page: $page, webPageArea: $webPageArea) {\n    result\n    llsid\n    webPageArea\n    feeds {\n      type\n      author {\n        id\n        name\n        following\n        headerUrl\n        headerUrls {\n          cdn\n          url\n          __typename\n        }\n        __typename\n      }\n      tags {\n        type\n        name\n        __typename\n      }\n      photo {\n        id\n        duration\n        caption\n        likeCount\n        realLikeCount\n        coverUrl\n        coverUrls {\n          cdn\n          url\n          __typename\n        }\n        photoUrls {\n          cdn\n          url\n          __typename\n        }\n        photoUrl\n        liked\n        timestamp\n        expTag\n        animatedCoverUrl\n        stereoType\n        videoRatio\n        profileUserTopPhoto\n        __typename\n      }\n      canAddComment\n      currentPcursor\n      llsid\n      status\n      __typename\n    }\n    hostName\n    pcursor\n    __typename\n  }\n}\n"}
                data = json.dumps(data)
                response = requests.post(url, headers=self.headers, data=data)
                obj = json.loads(response.text)
                list = obj['data']['visionProfilePhotoList']['feeds']
                for li in list:
                    item = KuaishouProItem()
                    item['title'] = li['photo']['caption']
                    item['aweme_id'] = li['photo']['id']
                    item['account'] = li['author']['name']
                    item['cover'] = li['photo']['coverUrl']
                    item['link'] = li['photo']['photoUrl']
                    item['m_id'] = self.m_id
                    item['channel'] = 'kuaishou'
                    item['publish_at'] = None
                    create_time = li['photo']['timestamp']
                    time_array = time.localtime(create_time / 1000)
                    publish_at = time.strftime("%Y--%m--%d %H:%M:%S ", time_array)
                    item['publish_at'] = publish_at
                    # 判断是否已存数据库
                    select_sql = "SELECT * FROM ufutx_anchor_videos \
                                           WHERE aweme_id = '{}'".format(item['aweme_id'])
                    self.cursor.execute(select_sql)
                    results = self.cursor.fetchall()
                    if len(results) == 0:
                        print(1)
                        yield scrapy.Request(item['link'], callback=self.parse_video, meta={'item': item})
                    print(2)
                pcursor = obj['data']['visionProfilePhotoList']['pcursor']
                if pcursor == 'no_more':
                    break
                self.pcursor = pcursor
                yield scrapy.Request(self.start_urls[0], callback=self.parse)
            except:
                print("爬虫失败")
                break;
        if self.m_id:
            r = redis.Redis(host=settings.REDIS_HOST, password=settings.REDIS_PASSWORD, port=settings.REDIS_PORT,
                        db=settings.REDIS_DB)
            m_status_key = 'kuaishou:status:' + str(self.m_id)
            r.set(m_status_key, 2)
        self.cursor.close()
        self.conn.close()
    def parse_video(self, response):
        item = response.meta["item"]
        # 判断目录是否存在
        path_dir = '../spider_files/' + item['account']
        if not os.path.isdir(path_dir):
            os.mkdir(path_dir)
        path = (item['link'].split('?')[0]).rsplit('/', 1)[1]
        file_path = os.path.join(path_dir, path)
        if not os.path.exists(file_path):
            with open(file_path, 'wb') as f:
                f.write(response.body)
        item['local_link'] = file_path
        yield scrapy.Request(item['cover'], callback=self.parse_pic, meta={'item': item})

    def parse_pic(self, response):
        item = response.meta["item"]
        path = (item['cover'].split('?')[0]).rsplit('/', 1)[1]
        path_dir = '../spider_files/' + item['account']
        file_path = os.path.join(path_dir, path)
        if not os.path.exists(file_path):
            with open(file_path, 'wb') as f:
                f.write(response.body)
        item['local_cover'] = file_path
        yield item
