#!/usr/bin/env python
# -*- coding: utf-8 -*-
# For sister Deng Hong
# Description: crawler process
# Date: 2018-04-15

from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options

from . import brwutil
from . import db
from . import db2xls

# 题录类，保存题录信息


class ReferenceItem():
    def __init__(self, title=None, year=None, authors=None, keywords=None, journal=None, issue=None, issn=None, source=None, abstract=None, institutions=None, category=None, url=None, doi=None):
        self.title = title == None if "" else title
        self.year = year == None if "" else year
        self.authors = authors == None if "" else authors
        self.keywords = keywords == None if "" else keywords
        self.journal = journal == None if "" else journal
        self.issue = issue == None if "" else issue
        self.issn = issn == None if "" else issn
        self.source = source == None if "" else source
        self.abstract = abstract == None if "" else abstract
        self.institutions = institutions == None if "" else institutions
        self.category = category == None if "" else category
        self.url = url == None if "" else url
        self.doi = doi == None if "" else doi

    def __str__(self):
        item_text = "{title:%s \nyear: %s\nurl: %s\ndoi: %s\n}" % (
            self.title, self.year, self.url, self.doi)
        return item_text.encode("gbk", "ignore")


# 爬虫类，所有的爬虫都应该继承自该类，并改写
# get_page_count - 返回页面数量
# get_items_urls - 返回指定列表页的所有item的url集合
# get_item - 根据具体url获取到具体的ReferenceItem对象
# 三个方法
class Crawler():
    def __init__(self, source, db_path):
        self.__closed__ = False
        self.browser = brwutil.build_browser()
        self.export2excel = True
        self.source = source
        self.db_path = db_path
        self.conn = db.ReferenceDAO(self.source, self.db_path)

    def export_excel(self):
        db2xls.main(self.db_path)

    def close(self):
        if not self.__closed__:
            self.conn.close()
            self.__closed__ = True

    def item_exists(self, item):
        return self.conn.exists(item)

    def store_item(self, item):
        if not self.item_exists(item):
            self.conn.add(item)
        else:
            print("%s exists" % (item.title))

    # override
    def get_page_count(self):
        return 0

    # override
    def get_items_urls(self, page_index):
        return []

    # override
    def get_item(self, url):
        return ReferenceItem()

    # 启动爬虫，请不要改写该方法
    def start(self, auto_close_db):  # auto_close_db: 是否在结束后自动关闭数据库，如果False，需要手动调用close()方法关闭数据库
        self.page_count = self.get_page_count()
        idx = 0
        for page_index in range(0, self.page_count):
            urls = self.get_items_urls(page_index)
            for url in urls:
                refItem = self.get_item(url)
                if refItem == None:
                    continue
                # print(refItem)
                self.store_item(refItem)
                idx += 1
                if idx % 100 == 0:
                    self.conn.commit()

        self.conn.commit()
        if auto_close_db:
            self.close()

        if self.export2excel:
            self.export_excel()

        print("%s process successful" % (self.source))
