# -*- coding: utf-8 -*-
import scrapy
import pymysql
import time
import re
import urllib.request
from scrapy.http import Request, FormRequest
from zhihu.items import ZhihuItem, PaperItem
from scrapy.linkextractors import LinkExtractor
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities


class PaperSpider(scrapy.Spider):
    name = 'paper'
    allowed_domains = ['zhihu.com']
    # start_urls = ['http://zhihu.com/']

    headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.95 Safari/537.36"}
    item_list_tp2nd = []
    # 设置每次同时爬取的数量，否则会同时爬取 15000+ ,没事，通过延时控制


    def start_requests(self):
        self.get_tp2nd_list()
        cnt = 1
        for item in self.item_list_tp2nd:
            id = item["id"]
            name = item["name"]
            url = item["url"]
            print("开始请求第【"+str(cnt)+"】条二级话题【"+str(name)+"】下的文章信息 ......")
            yield Request(url, meta={"cookiejar": 1}, headers=self.headers, callback=self.parse)
            cnt = cnt + 1
            time.sleep(1)

    def parse(self, response):
        # 由对应的 request 正文中找到是哪个一级话题请求的
        id_tp2nd = re.compile('topic.(\d{1,}).*?').findall(response.url)
        name_tp2nd = response.xpath("//h1[@class='TopicCard-titleText']/text()").extract()

        if id_tp2nd and name_tp2nd:
            print("\t正在爬取二级话题【"+str(name_tp2nd[0])+"】下的内容，话题 id 为 "+str(id_tp2nd[0])+" ......")
            # 问答类
            links_qa = LinkExtractor(allow='/question/.+/answer/').extract_links(response)
            # 专栏文章类
            links_zl = LinkExtractor(allow='//zhuanlan.+/p/').extract_links(response)

            if links_qa:
                print("\t\t共爬取到【"+str(len(links_qa))+"】项问答类文章，正在存入 ......")
                for link in links_qa:
                    item = PaperItem()
                    item["title"] = link.text
                    item["url"] = link.url
                    item["p_type"] = "QA"
                    yield item
            else:
                print("\t\t没有爬取到问答类文章")

            if links_zl:
                print("\t\t共爬取到【"+str(len(links_zl))+"】项专栏文章，正在存入 ......")
                for link in links_zl:
                    item = PaperItem()
                    item["title"] = link.text
                    item["url"] = link.url
                    item["p_type"] = "Zhualan"
                    if item["title"] != "侵权举报":
                        yield item
            else:
                print("\t\t没有爬取到专栏文章")

        else:
            print("\t提取话题名称和 id 出错 ")
            return

    def get_tp2nd_list(self):
        db = self.settings.get('MYSQL_DB_NAME', 'scrapy_default')
        host = self.settings.get('MYSQL_HOST', 'localhost')
        port = self.settings.get('MYSQL_PORT', 3306)
        user = self.settings.get('MYSQL_USER', 'root')
        passwd = self.settings.get('MYSQL_PASSWORD', 'root')

        # 注意这里的 utf8 不是 utf-8,是和你建表时的要一样，否则会报错，说没有 encode 方法
        self.db_conn = pymysql.connect(host=host, port=port, db=db, user=user, password=passwd, charset="utf8")
        self.db_cur = self.db_conn.cursor()

        sql = 'SELECT * FROM topic2nd'
        self.db_cur.execute(sql)
        # 这里取出来的，每一个元素都是元组，而这个整体也是一个元组
        rows = self.db_cur.fetchall()
        cnt = int(self.db_cur.rowcount)

        print("正在从数据库中获取二级话题列表，共有【" + str(cnt) + "】条数据")
        for row in rows:
            item = ZhihuItem()
            item["id"] = row[0]
            item["name"] = row[1]
            item["url"] = row[2]
            self.item_list_tp2nd.append(item)

        #print("名称：%-30s  id: %-10s  url: %-40s" % (item["name"], item["id"], item["url"]))
        self.db_cur.close()
