# -*- coding: utf-8 -*-
import scrapy
from myproject.items import leetcode_huaren_Item
from scrapy.http import Request
from scrapy.http import FormRequest
from urllib import parse

class LeetcodeHuarenSpider(scrapy.Spider):
    name = 'leetcode_huaren'
    allowed_domains = ['huaren-it.com']
    start_urls = ['http://huaren-it.com/']

    def parse(self, response):
        self.start_requests(self)
        pass

    def start_requests(self):
        url = "http://huaren-it.com/"
        requests = []
        for i in range(1, 14):
            formdata = {"p": str(i)}
            request = FormRequest(url, callback=self.parse_list_page, formdata=formdata)
            requests.append(request)
        return requests

    def parse_list_page(self, response):
        lis = response.css('.latesttopics .topicsticky-false')
        if (lis):
            for li in lis:
                next_url =  li.css('.topicrow .rowdetails h3 a::attr(href)').extract_first()
                if next_url:
                    last_next_url = parse.urljoin(response.url, next_url)
                    yield Request(url=last_next_url, callback=self.parse_details_page)

    def parse_details_page(self, response):
            title = response.css('.topicshow .topicheading h1::text').extract_first()
            content = response.css('.postcontent::text').extract_first()
            print(content)
            if (content):
                # item = leetcode_huaren_Item()

                #    item['items_name'] = title
                #    item['items_name'] = content
                #   yield item
                print(title)
                print(content)