from scrapy import Request, Spider, FormRequest
import time
import datetime
from urllib.parse import urlencode
import requests


def getBetweenDay(begin_date, end_date):
    date_list = []
    begin_date = datetime.datetime.strptime(begin_date, "%Y-%m-%d")
    end_date = datetime.datetime.strptime(end_date, "%Y-%m-%d")
    while begin_date <= end_date:
        date_str = begin_date.strftime("%Y-%m-%d")
        date_list.append(date_str)
        begin_date += datetime.timedelta(days=1)
    return date_list


def get_real_url(url, try_count=1):
    http_headers = {
        'Accept': '*/*',
        'Connection': 'keep-alive',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.116 Safari/537.36'
    }
    if try_count > 3:
        return url
    try:
        rs = requests.get(url, headers=http_headers, timeout=10)
        if rs.status_code > 400:
            return get_real_url(url, try_count + 1)
        return rs.url
    except:
        return get_real_url(url, try_count + 1)


def optimize_str(str):
    return str.strip().replace('\n', '').replace('\r', '').strip()


class BaiduNewsSearchSpider(Spider):
    name = "baiduhome_pg"

    def start_requests(self):
        date_list = getBetweenDay('2019-04-30', '2019-05-04')
        keyword = "Jeep + 指南者 + 1.3t"
        for item in date_list:
            startTime = item + ' 00:00:00'
            endTime = item + ' 23:59:59'
            startTimestamp = int(time.mktime(time.strptime(startTime, "%Y-%m-%d %H:%M:%S")))
            endTimestamp = int(time.mktime(time.strptime(endTime, "%Y-%m-%d %H:%M:%S")))
            gpc = "stf={},{}|stftype=2".format(str(startTimestamp), str(endTimestamp))
            params = {
                "ie": "utf-8",
                "f": "8",
                "rsv_bp": "1",
                "rsv_idx": "2",
                "tn": "baiduhome_pg",
                "wd": keyword,
                "rsv_spt": "1",
                "oq": keyword,
                "rsv_pq": "ac26c235000e7317",
                "rsv_t": "b286eWiik+IJtEOKbgZiHnmV3LJlFmVW3nztNMm8XL+wtZxXJfV50vEk50m0SGDM1KOB",
                "rqlang": "cn",
                "rsv_enter": "1",
                "gpc": gpc,
                "tfflag": "1",
                "bs": keyword,
            }
            result = urlencode(params)
            yield Request(
                url="https://www.baidu.com/s?" + result,
                callback=self.parse,
                meta={
                    'keyword': keyword,
                    'postdate': item
                }
            )

    def parse(self, response):
        keyword = response.meta['keyword']
        postdate = response.meta['postdate']
        page = ''.join(response.xpath('//*[@id="page"]/strong/span[@class="pc"]/text()').extract())
        print(keyword, postdate, page)
        items = response.xpath('//*[@id="content_left"]/div')
        for item in items:
            data = {
                'keyword': keyword,
                'postdate': postdate,
                'page': page,
                'title': optimize_str(''.join(item.xpath('.//h3//text()').extract())),
                'url': get_real_url(item.xpath('.//h3/a/@href').extract()[0]),
            }
            yield data
            # s = data['url'].split('//')[-1]
            # form_data = {"type": "host", "s": s, "code": "", "havecode": "0"}
            # yield FormRequest(
            #     url="http://icp.chinaz.com/{}".format(s),
            #     formdata=form_data,
            #     callback=self.parse_srcsys,
            #     meta={'data': data}
            # )
        next_page = ''.join(response.xpath('.//a[contains(text(),"下一页")]/@href').extract())
        if len(next_page):
            yield Request(
                url="https://www.baidu.com" + next_page,
                callback=self.parse,
                meta={
                    'keyword': keyword,
                    'postdate': postdate
                }
            )

    def parse_srcsys(slef, response):
        data = response.meta['data']
        data['srcsys'] = ''.join(response.xpath('//span[text()="网站名称"]/following-sibling::p/text()').extract())
        yield data
