# -*- coding: utf-8 -*-
import os
import re

import scrapy
from pip._vendor import requests
from scrapy import Request
import datetime
from scrapy.utils.project import get_project_settings

from baidusolu.items import PostItem
from baidusolu.tool import get_today_filename, strq2b, clean_data, write_html, get_svc


class Spider(scrapy.Spider):
    name = 'baidusolu'

    allowed_domains = ['baidu.com']

    start_urls = [
        # 'https://www.baidu.com/s?id=utf-&wd=职场小白玩转信用卡新指南&q5=1'
    ]

    # post_map = {}

    post_titles = []

    post_links = []

    post_ids = []

    post_types = []

    # post_type_map = {
    #     'default':10,
    #     'credit':20,
    #     'finance':30,
    # }
    # skfile = open(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + '/statics', '20200421.svc'), 'r')

    clean_data()
    filename = get_svc('EXPORT_CLEAN_DIR')
    if os.path.exists(filename):
        for l in open(filename).readlines():
            line = l.strip('\n').strip()
            arr = line.split('|', -1)
            post_ids.append(arr[0])
            post_titles.append(arr[1])
            post_links.append(arr[2])
            post_types.append(arr[3])
            url = 'https://www.baidu.com/s?ie=utf-8&wd=' + arr[1]
            start_urls.append(url)

    headers = {
        "HOST": "www.baidu.com",
        'Cookie': '2k3dzlrdUFhTHpVQmNnS2VXRkozR3J6cTFlWGItRHBDOX4zVEtPcHNOMUhJTEplSVFBQUFBJCQAAAAAAAAAAAEAAACA5OAuX-Cp0arC3MDyAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEeTil5Hk4peT',
        "Referer": "https://www.baidu.com",
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36',
    }

    # def start_requests(self):
    #     clean_data()
    #
    #     settings = get_project_settings()
    #     basepath = settings.get('EXPORT_CLEAN_DIR')
    #     filename = basepath + get_today_filename() + '.svc'
    #
    #     if os.path.exists(filename):
    #         for l in open(filename).readlines():
    #             line = l.strip('\n').strip()
    #             arr = line.split(',', -1)
    #             url = 'https://www.baidu.com/s?ie=utf-8&wd=' + arr[2]
    #             yield Request(
    #                 url,
    #                 callback=self.parse,
    #                 meta={
    #                     'handle_httpstatus_list': [302, 200],
    #                 },
    #             )

    # https://www.baidu.com/s?ie=utf-8&wd=https://http://km.local.com/wenda/1
    # https://www.baidu.com/search/error.html
    def parse(self, response):
        items = response.xpath('//div[@class="result c-container "]/h3/a/@href')
        if items:
            real_links = self.get_baidu_real_urls(urls=items)
            _link = self.get_post_link_in_real_links(real_links=real_links)
            # 继续找第二页
            if not _link:
                # bd_search_kw = response.xpath('//input[@id="kw"]/@value').extract()[0]
                bd_search_kw = response.xpath('//title/text()').extract()[0][:-5]
                url = 'https://www.baidu.com/s?ie=utf-8&pn=10&wd=' + bd_search_kw
                yield Request(url, self.parse_page2)
            else:
                # 第一页存在
                index = self.post_links.index(_link)
                yield self.get_item(index, is_included=1, included_page_num=1, label="第1页")
        else:
            pass
            # write_html(response)
            # print('----------------------1')
            # yield self.get_item(index, is_included=1, included_page_num=1, label="第1页")

    def parse_page2(self, response):
        items = response.xpath('//div[@class="result c-container "]/h3/a/@href')

        if items:
            real_links = self.get_baidu_real_urls(urls=items)
            _link = self.get_post_link_in_real_links(real_links=real_links)

            if not _link:
                # 第二页不存在
                bd_search_kw = response.xpath('//title/text()').extract()[0][:-5]
                if bd_search_kw in self.post_titles:
                    index = self.post_titles.index(bd_search_kw)
                    yield self.get_item(index, is_included=1, included_page_num=6, label="6页之后")
            else:
                # 第二页存在
                index = self.post_links.index(_link)
                yield self.get_item(index, is_included=1, included_page_num=2, label="第2页")

    # 获取真实搜索结果文章链接
    def get_baidu_real_urls(self, urls):
        real_links = []
        for i in urls:
            bd_url = i.extract()
            # 请求百度文章链接，重定向不跳转，直接获取http
            http = requests.get(bd_url, headers=self.headers, allow_redirects=False)
            real_links.append(http.headers['Location'])
        return real_links

    # 搜索结果文章链接是否包含在 post_links 里面
    def get_post_link_in_real_links(self, real_links):
        _link = None
        for link in real_links:
            if link in self.post_links:
                _link = link
                break
        return _link

    def get_item(self, index, is_included, included_page_num, label):
        item = PostItem()
        item['type'] = int(self.post_types[index])
        item['post_id'] = int(self.post_ids[index])
        item['is_included'] = is_included
        item['included_page_num'] = included_page_num
        item['label'] = label
        item['status'] = 1
        return item
