# -*- encoding:utf-8 -*-
import re
import sys
import traceback
import requests
from service.base.Fetcher import Fetcher
from log.LoggingRecord import LogRec
from config.Config import Config
import time

__author__ = 'shudong.msd'


class NewsFetcher(Fetcher):

    # 返回['title','content','type','link_from','click_num','date','update_time']
    def do_work(self,req):
        try:
            resp = requests.get(req['target'], headers=self.get_headers())
            pageinfo = ''

            if req['style'] == 0:
                resp.encoding = 'gbk'
                content = resp.text
                pageinfo = self.filt_finance_content(content)
            elif req['style'] == 1:
                content = resp.text
                pageinfo = self.filt_licai_content(content)
            elif req['style'] == 2:
                resp.encoding = 'utf-8'
                content = resp.text
                pageinfo = self.filt_blog_content(content)
            elif req['style'] == 3:
                resp.encoding = 'gbk'
                content = resp.text
                pageinfo = self.filt_guba_content(content)

            # return pageinfo
            page_list = (req['title'],pageinfo,req['type'],req['target'],req['click_num'],req['date'],time.time())

            return page_list
        except Exception as e:
            info = sys.exc_info()
            err_logger = LogRec.get_logger(Config.ERRLOGGER)
            for file, lineno, function, text in traceback.extract_tb(info[2]):
                err_str = file, "line:", lineno, "in", function
                err_logger.error(err_str)
            err_str = "** %s: %s" % info[:2]
            err_logger.error(err_str)
            raise e



    # 过滤文章内容
    def filt_finance_content(self, content):
        res_info = ""
        try:
            infoPattern = '''<!--wapdump end-->([\s\S]*?)<!-- publish_helper_end -->'''
            reg = re.compile(infoPattern)
            matches = reg.findall(content)
            if matches:
                res_info = matches[0]

                for k, v in self.get_regex_rules().iteritems():
                    f_tmp = k.findall(res_info)
                    for item in f_tmp:
                        res_info = res_info.replace(item, v)

                for item in self.get_replace_rules():
                    res_info = res_info.replace(item, "")
        except Exception, e:
            raise Exception(e)

        if res_info.strip() == '':
            res_info = self.filt_finance_content_rank(content)
            #print res_info
        return res_info


    # 过滤汇总排行榜部分的finance
    def filt_finance_content_rank(self,content):
        res_info = ""
        try:
            infoPattern = '''<!-- publish_helper[\s\S]*?-->([\s\S]*?)<!-- publish_helper_end -->'''
            reg = re.compile(infoPattern)
            matches = reg.findall(content)
            if matches:
                res_info = matches[0]

                for k, v in self.get_regex_rules().iteritems():
                    f_tmp = k.findall(res_info)
                    for item in f_tmp:
                        res_info = res_info.replace(item, v)

                for item in self.get_replace_rules():
                    res_info = res_info.replace(item, "")
        except Exception, e:
            raise Exception(e)

        if res_info.strip() == '':
            res_info = self.filt_finance_content_content2(content)

        return res_info


    def filt_finance_content_content2(self,content):
        res_info = ""
        try:
            infoPattern = '''<!--wapdump end-->([\s\S]*?)<!-- news_keyword_pub'''
            reg = re.compile(infoPattern)
            matches = reg.findall(content)
            if matches:
                res_info = matches[0]

                for k, v in self.get_regex_rules().iteritems():
                    f_tmp = k.findall(res_info)
                    for item in f_tmp:
                        res_info = res_info.replace(item, v)

                for item in self.get_replace_rules():
                    res_info = res_info.replace(item, "")
        except Exception, e:
            raise Exception(e)
        return res_info



    def filt_licai_content(self, content):
        res_info = ""
        try:
            infoPattern = '''<div class="p_article">([\s\S]*?)</div>'''
            reg = re.compile(infoPattern)
            matches = reg.findall(content)
            if matches:
                res_info = matches[0]

                for k, v in self.get_regex_rules().iteritems():
                    f_tmp = k.findall(res_info)
                    for item in f_tmp:
                        res_info = res_info.replace(item, v)

                for item in self.get_replace_rules():
                    res_info = res_info.replace(item, "")
        except Exception, e:
            raise Exception(e)

        return res_info

    def filt_blog_content(self, content):
        res_info = ""
        try:
            infoPattern = ur'''<!-- 正文开始 -->([\s\S]*?)<!-- 正文结束 -->'''
            reg = re.compile(infoPattern)
            matches = reg.findall(content)

            if matches:
                res_info = matches[0]

                for k, v in self.get_regex_rules().iteritems():
                    f_tmp = k.findall(res_info)
                    for item in f_tmp:
                        res_info = res_info.replace(item, v)

                for item in self.get_replace_rules():
                    res_info = res_info.replace(item, "")
        except Exception, e:
            raise Exception(e)
        if res_info == '':
            res_info = self.filt_blog_content_2(content)
        return res_info

    def filt_blog_content_2(self, content):
        res_info = ""
        try:
            infoPattern = ur'''<!-- 内容区 -->([\s\S]*?)<!--/内容区-->'''
            reg = re.compile(infoPattern)
            matches = reg.findall(content)

            if matches:
                res_info = matches[0]

                for k, v in self.get_regex_rules().iteritems():
                    f_tmp = k.findall(res_info)
                    for item in f_tmp:
                        res_info = res_info.replace(item, v)

                for item in self.get_replace_rules():
                    res_info = res_info.replace(item, "")
        except Exception, e:
            raise Exception(e)

        return res_info

    def filt_guba_content(self, content):
        res_info = ""
        try:
            infoPattern = '''<div class='ilt_p'([\s\S]*?)<div class='ilt_panel clearfix'>'''

            reg = re.compile(infoPattern)
            matches = reg.findall(content)
            if matches:
                res_info = matches[0]

                for k, v in self.get_regex_rules().iteritems():
                    f_tmp = k.findall(res_info)
                    for item in f_tmp:
                        res_info = res_info.replace(item, v)

                for item in self.get_replace_rules():
                    res_info = res_info.replace(item, "")
        except Exception, e:
            raise Exception(e)

        return res_info


