# -*- coding: utf-8 -*-
import scrapy
from caiji import settings


class Xiaolai(scrapy.Spider):
    name = 'xiaolai'
    allowed_domains = []
    start_urls = []

    def __init__(self):
        for line in open("keywords.txt", "r", newline=None, encoding="utf-8"):
            self.start_urls.append("http://m.baidu.com/s?wd=" + line)

    def start_requests(self):
        for url in self.start_urls:
            headers = {
                'User-Agent': settings.USER_AGENTS,
                "Host": "m.baidu.com",
                "Cookie": "BAIDUID=B019883A489557949687FCD6626D7F19:FG=1; BIDUPSID=B019883A489557949687FCD6626D7F19; PSTM=1592730823; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; COOKIE_SESSION=0_0_0_0_0_0_0_0_0_0_0_0_0_1592900031%7C1%230_0_0_0_0_0_0_0_1592900031%7C1; MSA_WH=414_736; MSA_PBT=146; MSA_PHY_WH=1242_2208; MSA_ZOOM=1056; BDSFRCVID=lJ4OJeC62ZfhS_nr0aolvGIsRV8xccRTH6aola629RY_EmHc7WQ_EG0PeU8g0KA-N470ogKK3gOTH4PF_2uxOjjg8UtVJeC6EG0Ptf8g0M5; H_BDCLCKID_SF=tb4fVCI2JI83JtIkbn0hDDCShUFsb-ALB2Q-5KL-bx50Hpjk5J0VLpkPLqOpb-Lt5gb8afbdJJjo8pKx0lPa3fuU3H7kKUReJeTxoUJR5DnJhhvG-6rd0JLebPRit4r9Qg-qahQ7tt5W8ncFbT7l5hKpbt-q0x-jLTnhVn0M5DK0hIt9j5t2j53MKfRQeJJO25nqXTr2a-t3sMOsXU6qLT5X2-jT35bZb64L2loY04b4qUQFW6J8hp0njxQyb5QMKeJk2MbSWDnjettzMxonDh89XH7MJUntKJ5WatcO5hvvMJ6O3MA-yUKmDloOW-TB5bbPLUQF5l8-sq0x0bOte-bQbG_EJjLtJbkeoDKQbbj_fb7kbnQE-tChMMoXKK62aKDsQKJoBhcqEIL4K4jbyTDX5q3z0f6A06npQb5Ctq3EMxbSj4D5jx3D3n7PBj3O3g6GQJ5FWq5nhMJv3j7JDMP0-GQjJ-Qy523i_R6vQpP-OpQ3DRoWXPIqbN7P-p5Z5mAqKl0MLPbtbb0xb6_0DTObDG0qqTksMjQeo6r5KJ3BJKD9q4bohjPNyH79BtQmJJufVI3ltJoKhbjvXnjnyJtwyM7OJM72Qg-q3R7h5bOZ8D332RuhyfKILxj00x-jLN7uVn0MW-5DbtJ25PnJyUnybPnnBT3T3H8HL4nv2JcJbM5m3x6qLTKkQN3T-PKO5bRh_CFhtI0aMKIwejRb5nbHKh5yKRvja-o23KvsWDbcOR5Jj65Ab4k9BUTHJPoyaTcxa4O4QDnWHnO-3MA--t4j-HDHqljkX2TLan85QMjrsq0x0-ole-bQyPLLWj0ftIOMahkb5h7xOKbMQlPK5JkgMx6MqpQJQeQ-5KQN3KJmfbL9bT3YjjISKx-_t68jfRvP; delPer=0; H_PS_PSSID=32096_1447_31326_32141_32139_32046_32230_32144_31640; __cas__id__=0; __cas__st__=NLI; H_WISE_SIDS=148077_148178_149981_150073_147087_150087_148194_148867_150794_148713_150744_147280_150165_149586_149540_150154_148754_147889_148524_151033_127969_149571_149907_146550_150563_149718_146652_150346_146732_149557_145987_131423_100805_147528_150000_107313_148186_147717_149251_150909_140311_144966_149279_149771_148750_147546_148868_150377_110085; rsv_i=5e16A4i7yuf0%2FITK06hIw%2FsLMnk258njmLm4UkegbvADnFmYLXCVFqOxwlcXl8D6I400O7h%2FFoP8oK12qQApR2DKH9K2SO8; FEED_SIDS=55_2; plus_lsv=e1339ee5f098ff6b; plus_cv=1::m:49a3f4a6; Hm_lvt_12423ecbc0e2ca965d84259063d35238=1592899840,1593857767; Hm_lpvt_12423ecbc0e2ca965d84259063d35238=1593857767; SE_LAUNCH=5%3A26564296; BDSVRBFE=Go; BDICON=10123156; PSINO=5; ysm=8431; BDORZ=SFH",

            }
            yield scrapy.Request(url, headers=headers)

    def parse(self, response):
        print(response.request.headers['User-Agent'])
        ci = response.xpath(
            '//div[@id="page-relative"]/div[@class="bc relate"]/div[@class="rw-list"]/a/text()').extract()
        for texts in ci:
            print(texts)
            fo = open("结果01.txt", "a", newline=None, encoding="utf-8")
            if texts != "":
                fo.write(texts + "\n")
