import json
import os
import time

import scrapy

from douyin.items import DouyinVideoItem
from scrapy.utils.conf import closest_scrapy_cfg
proj_root = closest_scrapy_cfg()
assets_path=os.path.abspath(os.path.dirname('proj_root'))+'\\assets'

class Douyin1WithkeywordSpider(scrapy.Spider):
    name = 'douyin1_withkeyword'
    allowed_domains = ['www.douyin.com']
    start_urls = ['http://www.douyin.com/']
    custom_settings = {
        'LOG_FILE': 'G:/python/scrapy爬取抖音短视频/douyin/assets/douyin1_withkeyword.log',
    }
    def __init__(self, **kwargs):
        chear_file=open(self.custom_settings['LOG_FILE'],'w')
        chear_file.close()
        super().__init__(**kwargs)
        self.file=open('G:/python/scrapy爬取抖音短视频/douyin/assets/douyin1_withkeyword.json','r',encoding = 'utf-8')
        self.allSettings=json.load(self.file)
        self.searchParams=self.allSettings['searchParams']
        self.request_url = 'https://www.douyin.com/aweme/v1/web/general/search/single/'
        self.maxAge = 1000
        self.num=0
        self.count =  self.searchParams['count']
        self.keyword=self.searchParams['keyword']
        self.offset=self.searchParams['offset']
        self.publish_time=self.searchParams['publish_time']
        self.isOPenPush_time=self.searchParams['isOPenPush_time']
        self.is_filter_search=self.searchParams['is_filter_search']
        self.query_correct_type=self.searchParams['query_correct_type']
        self.sort_type=self.searchParams['sort_type']
        self.sign=True
        self.request_param = {
            "device_platform": "webapp",
            "aid": "6383",
            "channel": "channel_pc_web",
            # 关键字搜索的参数-----------
            "search_channel": "aweme_general",
	        "sort_type": self.sort_type,
	        "publish_time": self.publish_time,
	        "keyword": self.keyword,
	        "search_source": "normal_search",
	        "query_correct_type": self.query_correct_type,
	        "is_filter_search": self.is_filter_search,
	        "offset": self.offset,
	        "count": self.count,
            # 关键字搜索的参数-----------
            "version_code": "170400",
            "version_name": "17.4.0",
            "cookie_enabled": "true",
            "screen_width": "2560",
            "screen_height": "1440",
            "browser_language": "zh-CN",
            "browser_platform": "Win32",
            "browser_name": "Chrome",
            "browser_version": "98.0.4758.102",
            "browser_online": "true",
            "engine_name": "Blink",
            "engine_version": "98.0.4758.102",
            "os_name": "Windows",
            "os_version": "10",
            "cpu_core_num": "16",
            "device_memory": "8",
            "platform": "PC",
            "downlink": "10",
            "effective_type": "4g",
            "round_trip_time": "50",
            "webid": "webidValue",
            "msToken": "msTokenValue",
            "X-Bogus": "X-BogusValue",
            "_signature": "_signatureValue"
        }

    def start_requests(self):
        self.logger.warning('数据开始爬取数据:---------------------------')
        self.offset = self.offset + self.count
        self.request_param['keyword'] = self.keyword
        self.request_param['offset'] = str(self.offset)
        self.request_param['count'] = str(self.count)
        self.request_param['is_filter_search'] = self.is_filter_search
        self.request_param['query_correct_type'] = self.query_correct_type
        self.request_param['sort_type'] = self.sort_type
        self.request_param['publish_time'] = self.publish_time
        self.num=self.num+1
        self.logger.warning('爬取第{}组数据:---------------------------'.format(self.num))
        if self.isOPenPush_time == False:
            del self.request_param['publish_time']
        str1 = '?'
        for key, value in self.request_param.items():
            str1 = str1 + key + '=' + value + '&'
        request_url = self.request_url + str1[0:len(str1) - 1]
        # # 首先获取cookie
        yield scrapy.Request(
            request_url,
            headers={
                'isFirst': True,
                'keyword':self.keyword,
                'offset':self.offset,
                'count':self.count,
                'is_filter_search':self.is_filter_search,
                'query_correct_type':self.query_correct_type,
                'sort_type':self.sort_type
                },
            dont_filter=True,
            callback=self.parse
        )
        # 解析第一次数据

    def parse(self, response, **kwargs):
        # 我们对爬取的的内容进行解析
        item = DouyinVideoItem()
        self.allSettings['searchParams']['offset']=self.offset
        # 存储状态
        # 不存储上次读取下来的状态
        if  self.searchParams['isIncrementOffset'] == True:
            with open('G:/python/scrapy爬取抖音短视频/douyin/assets/douyin1_withkeyword.json','w') as file:
                json.dump(self.allSettings, file)
        elif self.sign:
            self.sign=False
            self.allSettings['searchParams']['offset']=0
            with open('G:/python/scrapy爬取抖音短视频/douyin/assets/douyin1_withkeyword.json','w') as file:
                json.dump(self.allSettings, file)

        if response.body is not None:
            item['aweme_list'] = json.loads(response.body.decode())
            if item['aweme_list'].get('has_more') is not None and item['aweme_list'].get('has_more')==1:
                item['has_more']=True
            else:
                item['has_more'] = False
            yield item
        time.sleep(10)
        print(self.offset)
        self.offset=self.offset+self.count
        self.request_param['keyword'] = self.keyword
        self.request_param['offset'] = str(self.offset)
        self.request_param['count'] = str(self.count)
        self.request_param['is_filter_search'] = self.is_filter_search
        self.request_param['query_correct_type'] = self.query_correct_type
        self.request_param['sort_type'] = self.sort_type
        self.request_param['publish_time'] = self.publish_time
        self.num = self.num + 1
        self.logger.warning('爬取第{}组数据:---------------------------'.format(self.num))
        if self.isOPenPush_time == False:
            del self.request_param['publish_time']
        str1 = '?'
        for key, value in self.request_param.items():
            str1 = str1 + key + '=' + value + '&'
        request_url = self.request_url + str1[0:len(str1) - 1]
        # # 首先获取cookie
        yield scrapy.Request(
            request_url,
            headers={
                'isFirst': False,
                'keyword': self.keyword,
                'offset': self.offset,
                'count': self.count,
                'is_filter_search': self.is_filter_search,
                'query_correct_type': self.query_correct_type,
                'sort_type': self.sort_type
            },
            dont_filter=True,
            callback=self.parse
        )
    def __del__(self):
        self.file.close()
