# !/usr/bin/env python
# -*- coding: utf-8 -*-

import sys
import os
import json
import psycopg2
from xtls.basecrawler import BaseCrawler
from bs4 import BeautifulSoup

from xtls.logger import get_logger
from company_crawler_util import get_db_config


ROOT_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
if ROOT_PATH not in sys.path:
    sys.path.append(ROOT_PATH)

logger = get_logger(__file__)
db_config = get_db_config(ROOT_PATH)


CATEGORY = {
    'zcfz': u'资产负债',
    'lr': u'利润',
    'xjll': u'现金流量',
}

class FinancialCrawler(BaseCrawler):
    def __init__(self, stock_list, category):
        super(FinancialCrawler, self).__init__()
        self.stock_list = stock_list

        self.conn = psycopg2.connect(database=db_config['db'], host=db_config['host'], port=db_config['port'],
                                     user=db_config['user'], password=db_config['pw'])
        self.cur = self.conn.cursor()
        self.url_first = 'http://stockdata.stock.hexun.com/2009_{category}_{stock_id}.shtml'
        self.url = 'http://stockdata.stock.hexun.com/2008/{category}.aspx?stockid={stock_id}&accountdate={date}'
        self.category = category

    def parse(self, soup):
        pass

    def get_date_list(self, stock_id):
        pass

    def run(self):
        for stock in self.stock_list():
            date_list = self.get_date_list(stock.stock_id)
            for date in date_list:
                url = self.url.format(category=self.category, stock_id=stock.stock_id, date=date)
                html = self.get(url)
                if not html:
                    continue

                soup = BeautifulSoup(html, 'html5lib')
                if not soup:
                    continue

                self.parse(soup)




conn = psycopg2.connect(database=db_config['db'], host=db_config['host'], port=db_config['port'],
                             user=db_config['user'], password=db_config['pw'])
cur = conn.cursor()


def create_stock_list():


if __name__ == '__main__':
    pass

