# -*- coding: utf-8 -*-
import scrapy
from scrapy_news.items import ScrapyNewsItem
import copy
# from urllib.parse import urljoin
import urllib
import urllib2
import urlparse


class ToutiaoSpider(scrapy.Spider):
    name = 'toutiao'
    allowed_domains = ['toutiao.com','www.toutiao.com']
    start_urls = ['https://www.toutiao.com/']

    def parse(self, response):
        print response.body
        #一级分类
        cat1 = response.xpath("/html/body/div/div[2]/div[1]/div/div/ul")
        item = ScrapyNewsItem()

        for i,c1 in enumerate(cat1):
            #一级目录名称
            c1title = c1.xpath("./a/span/text()")
            item['c1title'] = c1title.extract_first()
            # cat2 = c1.xpath(".//div[@class='product_list_box']")
            url = c1.xpath("./a/@href")
            item['listurl'] = url.extract_first()
            print item
            yield scrapy.Request(
                item['listurl'],
                callback=self.parse_list,
                meta={"item": copy.deepcopy(item)}
            )











    #某个分类的列表页面，里面是分类下面的新闻
    def parse_list(self,response):
        item = response.meta['item']
        metaitem = copy.deepcopy(item)
        newslist = response.xpath("/html/body/div/div[2]/div[2]/div[2]/ul")

        for i,news in enumerate(newslist):
            img = news.xpath("./div/div[1]/a/img")
            if img:
                newsurl = news.xpath(".//div[@class='article_title_click']/a/@href").extract_first()
                newsname = news.xpath(".//div[@class='article_title_click']/a/text()").extract_first()
                item['newsurl'] = urlparse.urljoin(item['listurl'],newsurl)
                item['ll_title'] = newsname
                yield scrapy.Request(
                    item['newsurl'],
                    callback=self.parse_datail,
                    meta={"item":copy.deepcopy(item)}
                )

        # next_url = response.xpath("//div[@class='searchPages']/span/a[@class='next']/@href").extract_first()
        # if next_url:
        #     next_url = next_url.replace('&','')
        #     yield scrapy.Request(
        #         next_url,
        #         callback=self.parse_list,
        #         meta={"item": copy.deepcopy(metaitem)}
        #     )




    #详细页面
    def parse_datail(self,response):
        item = response.meta['item']
        companymsgdiv = response.xpath("//div[@class='product-get-info-left-top-item']")
        #我们要的数据列表，电话，地址等
        cmdplist = companymsgdiv.xpath("./p")
        #注册资金
        registercapital = cmdplist[0].xpath("text()").extract_first()
        #联系人
        contact = cmdplist[1].xpath("text()").extract_first()
        #固话
        tel= cmdplist[2].xpath("text()").extract_first()
        #移动手机
        phone = cmdplist[3].xpath("text()").extract_first()
        #企业地址
        address = cmdplist[4].xpath("text()").extract_first()

        item['registercapital'] = registercapital
        item['contact'] = contact
        item['tel'] = tel
        item['phone'] = phone
        item['address'] = address
        yield item







