import scrapy


class ExampleSpider(scrapy.Spider):
    name = 'example'
    # allowed_domains = ['chinadaily.com.cn']
    start_urls = ['http://watchthis.chinadaily.com.cn',
                  'http://www.chinadaily.com.cn/world/america',
                  'http://www.chinadaily.com.cn/business/full_coverage/top1lists',
                  'http://www.chinadaily.com.cn/travel',
                  'http://www.chinadaily.com.cn/business/money',
                  'http://www.chinadaily.com.cn/sports/59b8d012a3108c54ed7dfc72',
                  'http://www.chinadaily.com.cn/world',
                  'http://www.chinadaily.com.cn/life/fashion/trendwatch',
                  'http://www.chinadaily.com.cn/business/biz_industries',
                  'http://www.chinadaily.com.cn/culture',
                  'http://www.chinadaily.com.cn/business/businessphotos',
                  'http://www.chinadaily.com.cn/culture/eventandfestival',
                  'http://www.chinadaily.com.cn/travel/citytours',
                  'http://www.chinadaily.com.cn/world/asia_pacific',
                  'http://www.chinadaily.com.cn/opinion/op-ed',
                  'http://www.chinadaily.com.cn/travel/news',
                  'http://www.chinadaily.com.cn',
                  'http://www.chinadaily.com.cn/world/middle_east',
                  'http://www.chinadaily.com.cn/china',
                  'http://www.chinadaily.com.cn/world/europe',
                  'http://www.chinadaily.com.cn/opinion/weeklysoundbites',
                  'http://www.chinadaily.com.cn/life/celebrity',
                  'http://www.chinadaily.com.cn/travel/footprint',
                  'http://www.chinadaily.com.cn/life/photo',
                  'http://www.chinadaily.com.cn/business/motoring',
                  'http://www.chinadaily.com.cn/world/china-us',
                  'http://www.chinadaily.com.cn/world/601a008ea31024ad0baa6e91',
                  'http://www.chinadaily.com.cn/regional',
                  'http://www.chinadaily.com.cn/sports/swimming',
                  'http://www.chinadaily.com.cn/culture/culturalexchange',
                  'http://www.chinadaily.com.cn/world/cn_eu',
                  'http://www.chinadaily.com.cn/opinion/opinionline',
                  'http://www.chinadaily.com.cn/business/chinadata',
                  'http://www.chinadaily.com.cn/sports/golf',
                  'http://www.chinadaily.com.cn/business/bizvideo',
                  'http://www.chinadaily.com.cn/opinion/forumtrends',
                  'http://www.chinadaily.com.cn/opinion/commentator',
                  'http://www.chinadaily.com.cn/culture/filmandtv',
                  'http://www.chinadaily.com.cn/life/video',
                  'http://www.chinadaily.com.cn/life/people',
                  'http://www.chinadaily.com.cn/travel/video',
                  'http://www.chinadaily.com.cn/sports/soccer',
                  'http://www.chinadaily.com.cn/life/health',
                  'http://www.chinadaily.com.cn/opinion',
                  'http://www.chinadaily.com.cn/business/companies',
                  'http://www.chinadaily.com.cn/travel/aroundworld',
                  'http://www.chinadaily.com.cn/travel/59b8d013a3108c54ed7dfca3',
                  'http://www.chinadaily.com.cn/business/tech',
                  'http://www.chinadaily.com.cn/culture/video',
                  'http://www.chinadaily.com.cn/sports/5a12a5d9a310f9b1cd619a19',
                  'http://www.chinadaily.com.cn/culture/musicandtheater',
                  'http://www.chinadaily.com.cn/sports/volleyball',
                  'http://epaper.chinadaily.com.cn/china',
                  'http://www.chinadaily.com.cn/life',
                  'http://www.chinadaily.com.cn/culture/photo',
                  'http://www.chinadaily.com.cn/life/fashion',
                  'http://www.chinadaily.com.cn/food',
                  'http://www.chinadaily.com.cn/opinion/readers',
                  'http://www.chinadaily.com.cn/travel/photo',
                  'http://www.chinadaily.com.cn/opinion/fromthechinesepress',
                  'http://www.chinadaily.com.cn/business/economy',
                  'http://www.chinadaily.com.cn/world/africa',
                  'http://www.chinadaily.com.cn/sports/china',
                  'http://www.chinadaily.com.cn/opinion/cartoon-index',
                  'http://www.chinadaily.com.cn/culture/heritage',
                  'http://www.chinadaily.com.cn/sports/basketball',
                  'http://www.chinadaily.com.cn/culture/art',
                  'http://www.chinadaily.com.cn/sports',
                  'http://www.chinadaily.com.cn/opinion/editionals',
                  'http://www.chinadaily.com.cn/world/China-Japan-Relations',
                  'http://www.chinadaily.com.cn/world/china-africa',
                  'http://www.chinadaily.com.cn/business',
                  'http://www.chinadaily.com.cn/sports/tennis',
                  'http://www.chinadaily.com.cn/opinion/columnists',
                  'http://www.chinadaily.com.cn/sports/stars',
                  'http://www.chinadaily.com.cn/culture/books',
                  'http://www.chinadaily.com.cn/travel/guidesandtips',
                  ]

    # 存储数据
    record = {}

    # 存储网页
    myUrl = set()

    def parse(self, response):

        # 简单粗暴点
        data = response.xpath("//div//a//text()").extract()

        path = 'E:\BianYiQu\AnalysisSys\src\ScrapyFrame\myScrapy\cache\cache.txt'

        with open(path, 'a', encoding='utf-8') as fp:
            for item in data:
                printList = str(item).strip().split(' ')
                if len(printList) <= 5:
                    continue

                # self.save(item)
                fp.write(str.strip(item))
                fp.write('\n')

    def geturl(self, response):
        urls = response.xpath("//div//ul//li//a/@href").extract()
        for url in urls:
            if str(url).startswith("//") and not str(url).endswith("html"):
                self.myUrl.add("\'http:" + url + "\',")

        for url in self.myUrl:
            print(url)
