# -*- coding:utf-8 -*-
from scrapy.spiders import CrawlSpider, Request, Rule
from yhdSpiders.items import ListItem, DetailsItem
from scrapy.linkextractors import LinkExtractor
import os

class YhdSpider(CrawlSpider):
    name = 'yhd'
    allowed_domains = ['yhd.com']
    # 定义提取详情页的链接规则
    rules = (
        Rule(LinkExtractor(allow=r"/\d+\.html$"), callback="parse_imdb", follow=True),
    )
    # 自定义请求头
    header = {
        'Accept': 'text/javascript, application/javascript, application/ecmascript, application/x-ecmascript, */*; q=0.01',
        'Accept-Encoding': 'gzip, deflate',
        'Accept-Language': 'zh-CN,zh;q=0.9',
        'Connection': 'keep-alive',
        'Cookie': 'cart_num=0; cart_cookie_uuid=7cc5c4d8-f5e3-431d-915e-1b2391fd178c; cid=NXFDODM0NmlUODU0N29QNzM4OHRHOTcwMWNPNzcwMmtHMzk2M29LMDUyNHRGNTU4; unpl=V2_bzNtbRZTQBUmARMEck1cUGJTQAkRA0ccdwAVAywYWgYwBkFaclRCFXwUR1FnGVsUZgsZXUpcQhNFCEdkfiksBWYCEV9HVkUdMFwEEUspXwVXCyJYQ1BBFH0KR1RLKVs1V15KAhgSF0csZkZkfBxbAGICE1RyVnMURUMoVTYcXQJlAhpfQ1dzFEUL; ipLoc-djd=2-2817-51973-0; provinceId=12; cityId=925; yhd_location=12_925_4305_0; __jdv=259140492|baidu|-|organic|not set|1524530075149; shshshfp=5fa53b952594250ee9bb22fd6055b116; shshshfpa=1a8dc05b-8d78-1557-6aec-b44d38121c82-1524564051; shshshfpb=0e4edc59ba3bc275910355cf823834106ad73b346c75fb1c35adf004eb; __jda=81617359.1524461900417434766315.1524461900.1524562951.1524616676.8; __jdc=81617359; __jdb=81617359.18.1524461900417434766315|8.1524616676; JSESSIONID=6C998D975637D5C8ECD61CDAC53F6851.s1',
        'Host': 'search.yhd.com',
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36',
        'X-Requested-With': 'XMLHttpRequest'
    }

    # 重写scrapy默认请求方法，组装爬虫url列表
    def start_requests(self):
        partPath = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
        with open(str(partPath) + '\data\\newMuyin.txt') as file:
            urls = file.readlines()
            for url in urls:
                link = url.strip()
                self.header['Referer'] = str(link)
                print('当前解析的分类url为：%s' % link)
                yield Request(url=link, headers=self.header, callback=self.parse)

    # 处理分类列表信息
    def parse(self, response):

        print(response.text)

    # 提取到详情页链接后的回调函数
    def parse_imdb(self, response):
        # print("当前解析的详情url为：%s" % response.url)
        # print(response.text)
        item = DetailsItem()
        item['url'] = response.url
        # item['title'] = "".join(response.xpath('//*[@class="fk-3"]/div[@class="hdd"]/h3/text()').extract())
        yield item