import os
import json
import urllib.request
from scrapy.selector import Selector
from scrapy.http import HtmlResponse
import scrapy

from scrapy.crawler import CrawlerProcess
from scrapy.settings import Settings

class InfoQSpider(scrapy.Spider):
    name = "infoq_tags"
    allowed_domains = ["infoq.cn"]
    start_urls = ['https://www.infoq.cn/public/v1/topic/getList']
    custom_settings = {
    	'ITEM_PIPELINES':{'tag_source.infoq.TagPipeline': 301},
        'LOG_LEVEL': 'INFO',
        'COOKIES_ENABLED': True,

    }

    def __init__(self):
        self.page_count = 0
        self.totgal_pages = 654

    def start_requests(self):
        # 浏览器用户代理
        headers = {
            'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.72 Safari/537.36 Edg/90.0.818.41'
        }
        # 指定cookies
        cookies = {
            'LF_ID': '1596546004203-7769949-7289805',
            'GRID': '8ed2875-174b7fd-6ad9862-31c4b7f',
            'SERVERID': '3431a294a18c59fc8f5805662e2bd51e|1619058525|1619058010'
        }
        urls = [
            'https://www.infoq.cn/public/v1/topic/getList'
        ]
        for url in urls:
            yield scrapy.Request(url=url, headers=headers, cookies=cookies, callback=self.parse)

    def parse(self, response):
        self.page_count += 1
        with open('test.html', 'w') as f:
            f.write(response.text)

        # tags = response.css('li>div>.title')
        # print(tags)
        # for tag in tags:
        #     name = tag.xpath('h2/a/text()').get()
        #     desc = tag.xpath('p/text()').get()
        #     star = tag.xpath('div/strong/text()').get()
            
        #     yield {
        #         'name': name,
        #         'desc': desc,
        #         'star': star
        #     }
        
        # next_page_list = response.css('.next')
        # if len(next_page_list)>0:
        #     next_page_item = next_page_list[len(next_page_list)-1]
        #     next_page = next_page_item.css('a::attr(href)').get()
        #     print('next_page:', next_page)
        #     yield response.follow(next_page, callback=self.parse, dont_filter=True)

class TagPipeline(object):
    def open_spider(self, spider):
        self.file = open('dataset/infoq.tag.json', 'w')
        self.file.write('[\n')
        self.count = 0
        self.tags = {}

    def close_spider(self, spider):
        self.file.write('\n]')
        self.file.close()

    def process_item(self, item, spider):
        if self.tags.get(item['name']) is not None:
            return
        self.tags[item['name']] = True

        words = []
        if self.count>0:
            words.append(',\n')
        words.append('  ')
        words.append(json.dumps(item, ensure_ascii=False).strip())
        line = ''.join(words)
        self.file.write(line)
        self.count += 1

def fetch():
    settings = Settings()
    process = CrawlerProcess()
    process.crawl(InfoQSpider)
    process.start()