import json
import os
import time

import scrapy
from douyin.items import DouyinVideoItem
from scrapy.utils.conf import closest_scrapy_cfg
proj_root = closest_scrapy_cfg()
assets_path=os.path.abspath(os.path.dirname('proj_root'))+'\\assets'
class Douyin1Spider(scrapy.Spider):
    name = 'douyin1'
    allowed_domains = ['www.douyin.com']
    start_urls = ['https://www.douyin.com/']
    custom_settings = {
        'LOG_FILE':'G:/python/scrapy爬取抖音短视频/douyin/assets/douyin1.log',
    }
    def __init__(self, **kwargs):
        chear_file=open(self.custom_settings['LOG_FILE'],'w')
        chear_file.close()
        super().__init__(**kwargs)
        self.request_url='https://www.douyin.com/aweme/v1/web/tab/feed/'
        self.maxAge=1000
        self.count=0
        self.refresh_index=0
        self.request_param = {
            "device_platform": "webapp",
            "aid": "6383",
            "channel": "channel_pc_web",
            "tag_id": "",
            "share_aweme_id": "",
            "count": "10",
            "refresh_index": "",
            "video_type_select": "0",
            "version_code": "170400",
            "version_name": "17.4.0",
            "cookie_enabled": "true",
            "screen_width": "2560",
            "screen_height": "1440",
            "browser_language": "zh-CN",
            "browser_platform": "Win32",
            "browser_name": "Chrome",
            "browser_version": "98.0.4758.102",
            "browser_online": "true",
            "engine_name": "Blink",
            "engine_version": "98.0.4758.102",
            "os_name": "Windows",
            "os_version": "10",
            "cpu_core_num": "16",
            "device_memory": "8",
            "platform": "PC",
            "downlink": "10",
            "effective_type": "4g",
            "round_trip_time": "50",
            "webid": "webidValue",
            "msToken": "msTokenValue",
            "X-Bogus": "X-BogusValue",
            "_signature": "_signatureValue"
        }

    def start_requests(self):
        self.logger.warning('数据开始爬取数据:---------------------------')
        self.refresh_index = self.refresh_index + 1
        self.logger.warning('爬取第{}组数据:---------------------------'.format(self.refresh_index))
        self.request_param['refresh_index']=str(self.refresh_index)
        str1='?'
        for key,value in self.request_param.items():
            str1=str1+key+'='+value+'&'
        request_url=self.request_url+str1[0:len(str1)-1]
        self.count=self.count+1
        # # 首先获取cookie
        yield scrapy.Request(
            request_url,
            headers={'refresh_index':self.refresh_index,'isFirst':True},
            dont_filter=True,
            callback=self.parse
        )
    # 解析第一次数据
    def parse(self, response, **kwargs):
        # 我们对爬取的的内容进行解析
        item=DouyinVideoItem()
        if response.body is not None:
            item['aweme_list'] = json.loads(response.body.decode())
            # 当返回的数据的has_more为false的时候,我们会进行退出爬虫
            if item['aweme_list'].get('has_more') is not None and item['aweme_list'].get('has_more') == 1:
                item['has_more'] = True
            else:
                item['has_more'] = False
            yield item

        time.sleep(10)
        self.refresh_index = self.refresh_index + 1
        self.count = self.count + 1
        self.logger.warning('爬取第{}组数据:---------------------------'.format(self.refresh_index))
        self.request_param['refresh_index'] = str(self.refresh_index)
        str1 = '?'
        for key, value in self.request_param.items():
            str1 = str1 + key + '=' + value + '&'
        request_url = self.request_url + str1[0:len(str1) - 1]
        # # 首先获取cookie
        yield scrapy.Request(
            request_url,
            headers={'refresh_index': self.refresh_index,'isFirst':False},
            dont_filter=True,
            callback=self.parse
        )
    def __del__(self):
        self.logger.warning('数据结束爬取:---------------------------')
