# -*- encoding:utf-8 -*-
import sys
import requests
import re
import traceback
from log.LoggingRecord import LogRec
from config.Config import Config
from dao.MySqlDAL import MySqlDAL
import datetime
from service.stockdebate.DebateFetcher import DebateFetcher
import time
import utils.DateUtil as dateutil
import json
import constant.constant_define as constant_define

reload(sys)
sys.setdefaultencoding('utf-8')
__author__ = 'shudong.msd'


class StockdebateCrawler(object):
    __headers = constant_define.headers

    __tablename = 'tb_stock_debate'

    __sqlUtil = MySqlDAL()

    def set_target(self, target):
        self.__target = target

    # 得到连接的列表
    def __get_news_link(self, type):
        link_list = list()
        resp = requests.get(self.__target, headers=self.__headers)
        resp.encoding = 'gb2312'
        if type in (11, 12):
            link_list = self.__filt_link(resp.text)
        elif type in (21,22):
            link_list = self.__filt_link_guba(resp.text)

        return link_list

    # 过滤文章的地址
    # [(地址,标题,时间),()...]
    def __filt_link(self, content):
        # 先找出div 部分，然后再在小块中匹配
        divPattern = r'''<div class="listBlk">([\s\S]*?)<div class="MainBtm">'''
        div_reg = re.compile(divPattern)
        div_content = div_reg.findall(content)[0]

        infoPattern = r'''<li><a href=([\s\S]*?) target=_blank>([\s\S]*?)</a>[\s\S]+?\(([\S\s]*?)\)</font></li>'''
        reg = re.compile(infoPattern)
        matches = reg.findall(div_content)
        return matches

    def __filt_link_guba(self, content):
        # 先找出div 部分，然后再在小块中匹配
        divPattern = r'''<div class="listBlk">([\s\S]*?)<div class="MainBtm">'''
        div_reg = re.compile(divPattern)
        div_content = div_reg.findall(content)[0]

        infoPattern = r'''<li><a href="([\s\S]*?)" target="_blank">([\s\S]*?)</a>[\s\S]+?\(([\S\s]*?)\)</font></li>'''
        reg = re.compile(infoPattern)
        matches = reg.findall(div_content)
        return matches

    def __filt_repeat_link(self, type=0):
        # 得到连接
        tmp_link_list = self.__get_news_link(type)

        # 过滤数据库中已有的信息
        target_list = list()

        filt_sql = 'SELECT title FROM ' + self.__tablename + ' WHERE date > DATE_SUB("' + str(
            datetime.date.today()) + '",INTERVAL 5 DAY);'
        filt_dic_list = self.__sqlUtil.get_dimensions_rows(filt_sql)
        filt_title_list = [str(item['title']) for item in filt_dic_list]

        target_title_filt_list = list()

        for row in tmp_link_list:
            if str(row[1].strip()) in filt_title_list:
                continue
            if str(row[1].strip()) in target_title_filt_list:
                continue

            # 过滤同一页中的相同信息
            target_title_filt_list.append(str(row[1].strip()))

            row = list(row)
            tmp_link = row[0].strip()
            # 过滤blog情况
            if type in (11,12):
                tmp_index = tmp_link.rfind("?")
                if tmp_index != -1:
                    row[0] = tmp_link[:tmp_index]
            target_list.append(row)
        return target_list

    # [['title','content','type','link_from','date','update_time'],[]]
    def writeNewsToDB(self, type=0):

        try:
            link_list = self.__filt_repeat_link(type)

            # 多线程得到内容
            f = DebateFetcher(threads=10)

            for row in link_list:
                fm_date = dateutil.getFormatDateForDebate(row[2], type)
                f.push({'target': row[0].strip(),'title': row[1].strip(), 'date': fm_date,'type': type})

            source_list = list()
            sucess_record = 0
            while f.task_left():
                res_map = f.pop()
                if res_map:
                    source_list.append(res_map)
                if len(source_list) > 10: # 防止list内存被撑爆
                    # 刷新到数据库
                    # for row in source_list:
                    #     print row[4],row[1]
                    sucess_record += self.__write_db(source_list)
                    del source_list[:]  # 清空
                print f.task_left()
                time.sleep(0.2)

            if len(source_list) > 0:
                sucess_record += self.__write_db(source_list)

            return sucess_record
        except Exception, e:
            info = sys.exc_info()
            err_logger = LogRec.get_logger(Config.ERRLOGGER)

            for file, lineno, function, text in traceback.extract_tb(info[2]):
                err_str = file, "line:", lineno, "in", function
                err_logger.error(err_str)
            err_str = "** %s: %s" % info[:2]
            err_logger.error(err_str)
            raise e

    # 写入数据库
    def __write_db(self, data_source):
        data_keys = ['title','content','type','link_from','date','update_time']

        return self.__sqlUtil.insert_many(data_keys, data_source, self.__tablename)
