import json
import time
import os.path
import requests
import scrapy
import redis
from douyin_pro.items import DouyinProItem
from douyin_pro import settings
import pymysql

def get_start_urls():
    r = redis.Redis(host=settings.REDIS_HOST,password=settings.REDIS_PASSWORD, port=settings.REDIS_PORT,db=settings.REDIS_DB)
    url_arr = []
    rs_key = 'merchant:douyin'
    try:
        result = r.rpop(rs_key)
        print('获取redis信息--douyin')
        print(result)
        # result = '{"m_id":"69","value":"https://www.douyin.com/user/MS4wLjABAAAAll7RyQRaYC6iBWlTk0bCE79q6_vVPkEeseZ5WjqVAJ4"}'
        if (not result):
            return url_arr
        obj = json.loads(result)
        m_id = obj['m_id']
        home_url = obj['value']
        # home_url = "https://www.douyin.com/user/MS4wLjABAAAALVUEYDwozejNAWUSUv15dx5lO9fiTxLQPbJRBx0LXcI?enter_from=author_card&from_gid=7017481906736483614&tab_name=recommend"
        key = (home_url.split('?')[0]).rsplit('/',1)[1]
        url = "https://www.iesdouyin.com/web/api/v2/aweme/post/?sec_uid=" + key + "&count=21&max_cursor=0&aid=1128&_signature=1h7TsAAAtyoxaSu2AWCnetYe06&dytk=&m_id="+str(m_id)
        url_arr.append(url)
        # 设置商户快手爬虫状态
        m_status_key = 'douyin:status:' + str(m_id)
        r.set(m_status_key, 1)
    except:
        r.lpush(rs_key, result)
    return url_arr


class DouyinSpider(scrapy.Spider):
    name = 'douyin'
    allowed_domains = []

    start_urls = get_start_urls()

    def parse(self, response):
        print(response)
        obj = json.loads(response.text)
        list = obj['aweme_list']
        for aweme in list:
            try:
                if aweme['video']:
                    item = DouyinProItem()
                    item['title'] = aweme['desc']
                    item['aweme_id'] = aweme['aweme_id']
                    item['account'] = aweme['author']['nickname']
                    item['cover'] = aweme['video']['origin_cover']['url_list'][0]
                    item['link'] = aweme['video']['play_addr']['url_list'][0]
                    item['m_id'] = response.url.split('m_id=')[1]
                    item['channel'] = 'douyin'
                    item['publish_at'] = None
                    if (hasattr(aweme,'create_time')):
                        create_time = aweme['create_time']
                        time_array = time.localtime(create_time)
                        publish_at = time.strftime("%Y--%m--%d %H:%M:%S", time_array)
                        item['publish_at'] = publish_at
                    conn = pymysql.connect(host=settings.DB_HOST, database=settings.DB_DATABASE, user=settings.DB_USER,
                                           password=settings.DB_PASSWORD)
                    cursor = conn.cursor()
                    select_sql = "SELECT * FROM ufutx_anchor_videos \
                                           WHERE aweme_id = '{}'".format(item['aweme_id'])
                    cursor.execute(select_sql)
                    results = cursor.fetchall()
                    if len(results) == 0:
                        yield scrapy.Request(item['link'], callback=self.parse_video,meta={'item':item})
                    cursor.close()
                    conn.close()
            except BaseException as e:
                print("爬虫查询错误")
                print(e)
        max_cursor = str(obj['max_cursor'])
        next_url = response.url
        result = next_url.split('&')
        for res in result:
            if 'max_cursor' in res:
                current_cursor = res.split('=')[1]
                if max_cursor != current_cursor:
                    next_url = next_url.replace(res, 'max_cursor='+max_cursor)
                    yield scrapy.Request(next_url, callback=self.parse)
        if response.url.split('m_id=')[1]:
            r = redis.Redis(host=settings.REDIS_HOST, password=settings.REDIS_PASSWORD, port=settings.REDIS_PORT,
                        db=settings.REDIS_DB)
            m_status_key = 'douyin:status:' + str(response.url.split('m_id=')[1])
            r.set(m_status_key, 2)
    def parse_video(self, response):
        item = response.meta["item"]
        #判断目录是否存在
        path_dir = '../spider_files/' + item['account']
        if not os.path.isdir(path_dir):
            os.mkdir(path_dir)
        path = (item['link'].split('?')[0]).rsplit('/',2)[1]
        file_path = os.path.join(path_dir, path)
        file = '{}.mp4'.format(file_path)
        if not os.path.exists(file_path):
            with open(file, 'wb') as f:
                f.write(response.body)
        item['local_link'] = file
        yield scrapy.Request(item['cover'], callback=self.parse_pic, meta={'item': item})
    def parse_pic(self, response):
        item = response.meta["item"]
        path = (item['cover'].split('?')[0]).rsplit('/',1)[1]
        path_dir = '../spider_files/' + item['account']
        file_path = os.path.join(path_dir, path)
        if not os.path.exists(file_path):
            with open(file_path, 'wb') as f:
                f.write(response.body)
        item['local_cover'] = file_path
        yield item