import scrapy
from scrapy.linkextractors import LinkExtractor
import math
from lyl.items import LylItem
import time
from scrapy.selector import Selector
from scrapy.http import Request
from scrapy.spiders import Spider
import requests


class lylSpider(scrapy.Spider):#继承自
    name = 'computer'#定义的爬虫名字 爬虫文件名称：就是爬虫源文件的一个唯一标识
    # allowed_domains = ['read.douban.com']#允许爬取的域名
    start_urls = ['http://product.china-pub.com/']#起始的url列表:该列表中存放的url会被scrapy自动进行请求的发送 要爬取的网站(列表)

    # 爬取每一页
    #用于数据解析：response参数表示的就是请求成功后对应的响应对象
    def parse(self, response):#定义一个Parse方法，使用scrapy爬取网址后得到的相应response作为参数。在这里解析爬取到的网页
        # num = response.xpath('//tr/td[last()-1]/a/text()').extract_first()
        # print(num)
        # print (type(num))
        # urls = ['/cache/browse2/59/{}_1_59-05_0.html'.format (str(i)) for i in range(1,num+1)]
        urls = ['/cache/browse2/59/{}_1_59-05_0.html'.format (str(i)) for i in range(1,14)]
        for url in urls:
            # url = '/cache/browse2/59/1_2_59-05_0.html'
            url = response.urljoin(url)
            # print(urls)
            yield scrapy.Request(url=url, callback=self.parse_page)

    def parse_page(self, response):
        urls = response.xpath('//li[@class="result_name"]/a[1]//@href').extract()
        # print(urls)
        for url in urls:
            url = response.urljoin(url)
            # print(url)
            yield scrapy.Request(url=url,meta={'url': url},callback=self.parse_data) #meta解析函数之间传递值


    def parse_data(self, response):
        # try:
        item = LylItem()
        # 书名名字
        item['bookname'] = response.xpath('//div[@class="pro_book"]//h1/text()').extract_first()
        # print(item['bookname'])

        # 网址
        item['link'] = response.meta['url'] #meta解析函数之间传递值
        #print(item['link'])

        # 简介
        item['details'] = response.xpath('//div[@class="pro_name_intr"]/span/text()').extract_first("暂无简介")
        # print(item['details'])
        if (item['details']==" "):
            item['details'] = '暂无简介'
        print(item['details'])

        #上传者
        item['uploader'] = "wj"

        #浏览量
        item['view'] = 0

        yield item  # 生成器
        # except Exception:
        #     print()
        pass
    # 爬虫名字不要和工程同名

