import scrapy
import os
import re
import copy
import pymysql
from BAIDUZHIDAO.items import BaiduzhidaoItem


class ZhidaoSpider(scrapy.Spider):
    name = 'zhidao'
    allowed_domains = ['baidu.com', 'zhidao.baidu.com']

    # start_url = ['https://zhidao.baidu.com/search?word='+'微信小程序开发']

    def __init__(self, name=None, **kwargs):
        super(scrapy.Spider, self).__init__()
        self.db = pymysql.connect(
            host='115.159.211.233',
            user='baiduzhidao',
            password='zhidao',
            database='baiduzhidao',
            charset='utf8',
        )
        self.cursor = self.db.cursor()

    def start_requests(self):
        sql = '''select * from keyword WHERE 已采=0'''  # LIMIT 1

        self.cursor.execute(sql)
        sql_reslut = self.cursor.fetchall()
        for i in sql_reslut:
            keyword = i[0]
            yifa = i[1]
            id = i[2]
            yield scrapy.Request(url='http://zhidao.baidu.com/search?word=' + keyword, callback=self.parse_one_url,
                                 meta={'keyword': keyword, 'id': id, 'yifa': yifa})

    def parse_one_url(self, response):
        print(response.xpath('//title/text()').get())
        dd = response.xpath(
            '''//div[@id="wgt-list"]//dl[contains(@class,"dl") and not(contains(@class,"ad"))]//dt/a/@*''').get()
        url_list = re.findall('http://.*html?', dd)
        keyword = response.meta['keyword']
        for i in url_list:
            item = BaiduzhidaoItem()
            item['keyword'] = keyword
            item['url'] = i
            print(item['url'])
            yield scrapy.Request(url=item['url'], callback=self.parse_two_url, meta={'item': item})

    def parse_two_url(self, response):
        item = response.meta['item']
        item['title'] = response.xpath('//title/text()').get().replace('_百度知道', '')
        dd = response.xpath('//div[contains(@id,"content")]')
        回答列表 = []
        for i in dd:
            回答列表.append(i.get())
        item['content'] = '#####'.join(回答列表)
        que_ul = response.xpath('//ul[contains(@class,"related-ul")]//li')
        count = []

        for i in que_ul:
            url = 'https://zhidao.baidu.com' + i.xpath('.//a/@href').get().split('?')[0]
            yield scrapy.Request(url=url, callback=self.parse_three_url, meta={'item': item, 'e': count},
                                 dont_filter=True)

    def parse_three_url(self, response):
        pass
        print(123)
