import scrapy


class DomainSpider(scrapy.Spider):
    name = 'domain'
    # allowed_domains = ['www.xxx.com']
    # 百度pc:http://www.baidu.com/s?wd=400米塑胶跑道&pn=1&rn=50
    # 百度移动:http://m.baidu.com/s?wd=400米塑胶跑道&pn=10
    # 360pc:https://www.so.com/
    # 360移动:https://m.so.com/
    # 搜狗pc:https://www.sogou.com/
    # 搜狗移动:https://m.sogou.com/

    def start_requests(self):
        # baidu_pc ='http://www.baidu.com/s?wd={wd}&pn={pn}&rn=50'
        # baidu_h5 ='http://m.baidu.com/s?wd={wd}&pn={pn}'
        baidu_pc = 'https://www.baidu.com/s?wd=%E5%B0%8F%E9%BE%99%E8%99%BE%E9%9B%B6%E5%94%AE'
        yield scrapy.Request(url =baidu_pc,callback=self.parse)

    def parse(self, response):
        li_list = response.xpath('//*[@id="content_left"]/div[@class="result c-container new-pmd"]')
        for li in li_list:
            li_url =li.xpath('.//div[@class="f13 c-gap-top-xsmall se_st_footer user-avatar"]/a/text()').extract_first()
            print(li_url)

