# -*- coding: utf-8 -*-
import json

import scrapy
from scrapy.spiders import CrawlSpider, Rule
from ..items import OpencourseItem, _DBConf, loadConf
from scrapy.linkextractors import LinkExtractor
import pymongo
from scrapy.http import Request
from scrapy.exceptions import CloseSpider

class CourseSpider(CrawlSpider):
    name = "ForeXTime"
    allowed_domains = ["forextime.com.cn"]
    start_urls = (
        'https://www.forextime.com.cn/',
    )
    rules = [
        Rule(LinkExtractor(allow=('/zh/(.*?)'),
                           allow_domains=("forextime.com.cn"),
                           deny_domains=("forextime.com")),
                           callback='parse_item',
                           follow=False)
    ]
    #每次开始执行抓取，都将之前的数据清空
    i = 0

    def __init__(self, storeConf=json.dumps(_DBConf), limit_count=0, trash_data=True,*a, **kw):
        # 获取数据库配置
        super().__init__(*a, **kw)
        trash_data = bool(trash_data)
        self.limit_count = int(limit_count)
        self.logger.info("开始爬虫：")
        self.logger.info("参数信息: " + storeConf)
        self.logger.info("额外参数: " + 'limit_count' + "  "+str(limit_count))
        self.logger.info("额外参数: " + 'trash_data' + "  "+str(trash_data))
        if trash_data:
            DBConf = loadConf(json.loads(storeConf),_DBConf)
            collection = pymongo.MongoClient(DBConf["MONGODB_URI"]).get_database(
                DBConf['MONGODB_DATABASE']).get_collection(DBConf['MONGODB_COLLECTION'])
            self.logger.info("正在清除数据：。。。\n" )
            self.logger.info(collection.remove(dict(college=self.name)))


    def parse_item(self, response):
        if self.i >= self.limit_count != 0:
            self.close(CourseSpider,"抓取完成")
            raise CloseSpider("已抓取完成")
        sel = scrapy.Selector(response)
        item = OpencourseItem()
        error = ""
        try:
            bodyHtml = sel.xpath("//body").xpath("string(.)").extract()[0]
            title = sel.xpath("//title/text()").extract()[0]
        except Exception as e:
            bodyHtml = ""
            title = ""
            error = str(e)
            pass
        if bodyHtml != "" and title != "":
            item['html'] = bodyHtml
            item['title'] = title
            item['url'] = response.url
            item['college'] = self.name
            self.i += 1
            yield item
            print(self.name + " 网页抓取，第"+str(self.i)+"个页面，抓取成功")
        else:
            self.logger.info(self.name + " 网页抓取失败,错误原因:" + error)
            pass