# -*- coding: utf-8 -*-
'''
@author: olivia.dou
Created on: 2019/4/4 20:28
desc:
注：因ES server在产品环境只能由产品环境的服务器访问，因此采用mysql查询
        但是使用mysql查询存在部分快讯因不存在公司标签而在ES查询中没有返回结果
'''
import requests,unittest,logging,time,random,itertools
from base import ddt
import pandas as pd
from base.get_config import get_url_dict
from util.db_util import execute_query
from proj_spec.knews.load_industry import get_level2_industries,get_level2_industry
from proj_spec.knews.misc import has_intersection_with_dict_knews_labels
from util.string_util import encode_url_parameter

industries = get_level2_industries()
industry = industries[random.randint(0,len(industries)-1)]

@unittest.skip("debug")
@ddt.ddt
class TestKnews(unittest.TestCase):

    def setUp(self):
        self.search_root_url = get_url_dict()['url1'] + '/knews/query/v2/list'
        self.search_headers = {"userEmail": "test@123.com"}
        self.para_weight_url = get_url_dict()['url2'] + '/algorithm/process/para_weight'


    #@unittest.skip("debug other tests")
    @ddt.data(['&keyword=营业收入&&增长', 20, 20], ['&industry=%s' % industry, 10, 5])
    @ddt.unpack
    #@ddt.data('&industry=医疗保健设备与服务')
    def test_page(self,rela_url,p_limit1,p_limit2):
        """测试翻页 搜索条件:{0}，首次分页limit:{1}，后续分页limit:{2}"""

        finish = False

        limit = p_limit1
        offset = 0
        prevoffset=-1
        prevlimit=-1
        while not finish and offset/limit<20:
            url = self.search_root_url + '?limit=%d&offset=%d'%(limit, offset) + rela_url
            res = requests.get(url, headers=self.search_headers).json()
            logging.debug(url)
            if type(res['message']) is dict and res['message']['list']==[]:
                finish = True
            elif type(res['message']) is dict:
                logging.debug("返回结果数：%d, limit: %d, offset: %s"%(len(res['message']['list']),limit, offset))
                if len(res['message']['list'])>0 and prevoffset>=0: #前一个请求返回结果数小于limit，且当前请求有返回结果
                    self.assertTrue(1==0,
                                    "返回结果数量小于limit: %d,offset: %d" % (prevlimit, prevoffset))  # fail the test
                if len(res['message']['list'])<limit:
                    prevoffset=offset
                    prevlimit=limit

            offset += limit
            limit = p_limit2
            time.sleep(2)

    @unittest.skip("debug other tests")
    def test_no_company_tag(self):
        """验证公司标签取值为空的句子不会被搜索结果返回:"""
        sql = """
        select DISTINCT a.id,a.oid,a.sentence,f.tim from nlp_knews_sentence_all a
        left outer join nlp_knews_company c
        on c.oid = a.oid
        inner join nlp_news_feed f
        on f.id = a.oid
        left outer join nlp_knews_sentence_result r
        on r.sentence_id=a.id
        where a.status=2
        and (f.ext_company is null or f.ext_company='')
        and (c.abbr is null)
        and (r.company_list='' or r.company_list is null)
        order by f.tim desc
        """

        res = execute_query(sql)
        datafrm = pd.DataFrame(list(res), columns = ["id",  "oid", "sentence", "tim"])

        #随机取10条数据验证
        if len(datafrm)>=10:
            indexes = random.sample(range(len(datafrm)), 10)
        #logging.debug(indexes)

        for index in indexes:
            data_at_index = datafrm.iloc[index]
            logging.debug(data_at_index['id'])
            sentence = data_at_index['sentence']

            logging.debug(data_at_index['sentence'])
            para={}
            para['keyword']=sentence

            url = self.search_root_url + '?' + encode_url_parameter(para)

            logging.debug(url)
            response = requests.get(url, headers=self.search_headers).json()
            logging.debug(response)
            #message.total值可能不为0
            self.assertTrue(response['code']=="200", "查询异常")
            for item in response['message']['list']:
                self.assertTrue(item['id']!=data_at_index['id'], "没有公司标签的快讯也被搜索接口返回：id: %s, oid: %s, sentence: %s, tim: %s"%(data_at_index['id'], data_at_index['oid'], data_at_index['sentence'], data_at_index['tim']))

    @unittest.skip("debug other tests")
    @ddt.data([1, 2], [1, 3], [2, 1], [2, 2], [2, 3], [3, 1], [3, 2], [3, 3]) # txt_type为公告、研报
    @ddt.unpack
    def test_company_tag(self,scenario_id,txt_type):
        """验证查询接口返回的company字段（即公司标签）:场景{0},类型{1}"""
        """三种场景：
        场景一：信源、句子分析、公司算法分析均有值的快讯，公司标签为以上三个数据源公司值依次拼接
        场景二：信源无值，句子分析、公司算法分析均有值的快讯，公司标签为句子分析、公司算法分析公司值依次拼接
        场景三：信源无值、句子分析无值，公司算法分析有值的快讯，公司标签为公司算法分析公司值
        时间范围取 TEP-1397:txtTag的company字段不应过滤非上市公司部署之后的数据
        """
        if scenario_id==1: #信源、句子分析、公司算法分析均有值
            company_filter = """
                and not (f.ext_company is null or f.ext_company='')
                and not (r.company_list is null or r.company_list='')
                and not (c.abbr is null or c.abbr='')
                """
        elif scenario_id==2: #信源无值，句子分析、公司算法分析均有值
            company_filter = """
                and not (r.company_list is null or r.company_list='')        
                and not (c.abbr is null or c.abbr='')
                and c.crt >='2019-04-13 00:00:00'
                """
        elif scenario_id==3: #信源无值、句子分析无值，公司算法分析有值
            company_filter = """
                and (f.ext_company is null or f.ext_company='')
                and (r.company_list is null or r.company_list='')                
                and not (c.abbr is null or c.abbr='')
                """
        sql = """
            select a.id,a.oid,a.sentence,f.tim,ifnull(f.ext_company,''),ifnull(r.company_list,''),ifnull(group_concat(c.abbr order by c.id separator ','),'') 
            from nlp_news_feed f
            left join nlp_knews_company c on c.oid=f.id
            left join nlp_knews_sentence_all a on a.oid = f.id
            left join nlp_knews_sentence_result r on a.id=r.sentence_id
            where a.status=2
            and f.txt_type=%s
            and c.crt >='2019-04-13 00:00:00'
            %s
            group by a.id
            order by f.tim desc
            """ % (txt_type,company_filter)
        res = execute_query(sql)
        datafrm = pd.DataFrame(list(res), columns = ["id",  "oid", "sentence", "tim", "ext_company", "company_list", "abbr"])

        #随机取10条数据验证
        indexes = random.sample(range(len(datafrm)), min(len(datafrm),10))

        for index in indexes:
            data_at_index = datafrm.iloc[index]
            logging.info(data_at_index['id'])
            sentence = data_at_index['sentence']
            logging.info(data_at_index['sentence'])
            para={}
            para['keyword']=sentence

            url = self.search_root_url + '?' + encode_url_parameter(para)

            logging.info(data_at_index['company_list'])
            db_company_list = [data_at_index['ext_company']]
            db_company_list.extend(data_at_index['company_list'].split(','))
            db_company_list.extend(data_at_index['abbr'].split(','))

            # 使用set去重后按照原列表顺序输出
            db_company_set = set(db_company_list)
            if '' in db_company_set:
                db_company_set.remove('')
            new_db_company_list = list(db_company_set)
            new_db_company_list.sort(key=db_company_list.index)
            db_company_str = ','.join(new_db_company_list)

            #logging.debug(url)
            response = requests.get(url, headers=self.search_headers).json()
            logging.debug(response)
            #message.total值可能不为0
            self.assertTrue(response['code']=="200", "查询异常")
            for item in response['message']['list']:
                if item['id'] == data_at_index['id']:
                    logging.info(db_company_str)
                    self.assertTrue(item['company']==db_company_str,"company_tag: %s, company values in db: %s"%(item['company'],db_company_str))


    product_data = [(1,"句子分析有值"),(2,"句子分析无值")]
    #@ddt.data(1,2)
    @unittest.skip("debug other tests")
    @ddt.data(*product_data)
    @ddt.unpack
    def test_product_tag(self,scenario_id,case_desc):
        """验证产品标签: 取句子分析的产品:{1}"""
        """场景一：句子分析有值
        场景二：句子分析无值"""
        if scenario_id==1:
            product_filter = " AND not (r.product_list is null or r.product_list = '')"

        elif scenario_id==2:
            product_filter = " AND (r.product_list is null or r.product_list = '')"

        sql = """
            SELECT a.id,a.oid,a.sentence, ifnull(r.product_list,'')
            FROM nlp_knews_sentence_all a
            LEFT JOIN nlp_knews_sentence_result r
            ON r.sentence_id = a.id
            WHERE a.status=2
            %s
            ORDER BY a.crt desc
        """%product_filter

        res = execute_query(sql)
        datafrm = pd.DataFrame(list(res), columns = ["id",  "oid", "sentence", "sentence_product"])

        #随机取10条数据验证
        indexes = random.sample(range(len(datafrm)), min(len(datafrm),10))

        for index in indexes:
            data_at_index = datafrm.iloc[index]
            logging.info(data_at_index['id'])
            sentence = data_at_index['sentence']
            logging.info(data_at_index['sentence'])
            para={}
            para['keyword']=sentence

            url = self.search_root_url + '?' + encode_url_parameter(para)

            db_product_list = data_at_index['sentence_product'].split(',')

            # # 使用set去重后按照原列表顺序输出
            # db_product_set = set(db_product_list)
            # if '' in db_product_set:
            #     db_product_set.remove('')
            # new_db_product_list = list(db_product_set)
            # new_db_product_list.sort(key=db_product_list.index)
            db_product_str = ','.join(db_product_list)

            #logging.debug(url)
            response = requests.get(url, headers=self.search_headers).json()
            logging.debug(response)
            #message.total值可能不为0
            self.assertTrue(response['code']=="200", "查询异常")
            for item in response['message']['list']:
                if item['id'] == data_at_index['id']:
                    logging.info(db_product_str)
                    self.assertTrue(item['product']==db_product_str,"product_tag: %s, product values in db: %s"%(item['product'],db_product_str))

    #@unittest.skip("debug other tests")
    @ddt.data(1, 2)
    def test_industry_tag(self,scenario_id):
        """验证查询接口返回结果中行业标签: 场景{0}"""
        """场景一：产品标签不为空
        场景二：产品标签为空

        """

        if scenario_id==1:
            condition = " AND not (i.name is null or i.name = '')"
        else:
            condition = " AND (i.name is null or i.name = '')"

        sql = """
            SELECT a.id,a.oid,a.sentence, ifnull(group_concat(i.name order by i.id separator ','),'')
            FROM nlp_knews_sentence_all a
            LEFT JOIN nlp_news_industry i
            ON i.oid = a.oid
            WHERE a.status=2
            %s
            GROUP BY i.oid
            ORDER BY a.crt desc
        """%condition

        res = execute_query(sql)
        # logging.debug("query result:")
        # if len(res)>0:
        #     logging.debug(res[0])
        datafrm = pd.DataFrame(list(res),
                               columns=["id", "oid", "sentence", "industry"])

        #随机取10条数据验证
        indexes = random.sample(range(len(datafrm)), min(len(datafrm),10))

        for index in indexes:
            data_at_index = datafrm.iloc[index]
            logging.info(data_at_index['id'])
            sentence = data_at_index['sentence']
            logging.info(data_at_index['sentence'])

            indTags = []
            l4_industry_str = data_at_index['industry']
            for ind in l4_industry_str.split(','):
                #logging.debug(ind)
                indTags.append(get_level2_industry(ind))

            logging.debug(indTags)

            # 使用set去重后按照原列表顺序输出
            industry_set = set(indTags)
            if '' in industry_set:
                industry_set.remove('')
            new_industry_list = list(industry_set)
            new_industry_list.sort(key=indTags.index)
            ind_tag_str = ','.join(new_industry_list)

            para={}
            para['keyword']=sentence
            url = self.search_root_url + '?' + encode_url_parameter(para)

            response =  requests.get(url, headers=self.search_headers).json()
            logging.debug(response)

            self.assertTrue(response['code']=="200", "查询异常")
            for item in response['message']['list']:
                if item['id'] == data_at_index['id']:
                    logging.info(ind_tag_str)
                    self.assertTrue(item['industry']==ind_tag_str,"industry_tag: %s, industry tag on article: %s"%(item['industry'],ind_tag_str))

    #@unittest.skip("debug other tests")
    def test_sentence_sequence(self):
        """验证同一文本分离出的多个句子，按句子在段落中的位置顺序排序"""
        """1.查询接口找到连续三个以上同源句子
        2. 调用para_weight算法获取句子分析结果
        3. 确认句子顺序"""

        found = False
        limit = 100
        index = 0

        valid_sen_tags = [item[0] for item in list(execute_query("SELECT label FROM nlp_dict_knews_label"))]
        #logging.debug(valid_sen_tags)
        while found == False:
            url = self.search_root_url + "?limit=%d&offset=%d"%(limit,index*limit)
            res = requests.get(url,headers = self.search_headers).json()
            self.assertTrue(res['code']=='200')
            ids =[str(item['id']) for item in res['message']['list']]
            #logging.debug(ids)
            sql = "SELECT oid,id,sentence FROM nlp_knews_sentence_all where id in (%s)"%(','.join(ids))
            query_result = execute_query(sql)

            if len(query_result)>0:
                data = pd.DataFrame(list(query_result),columns=["oid",'id','sentence'])
            else:
                continue

            #对每个连续的数字进行分组
            num_times = [(k, len(list(v))) for k, v in itertools.groupby(data['oid'])]
            #logging.debug(num_times)

            search_oid = 0
            for item in num_times:
                if item[1]>=3: #oid连续三个以上
                    search_oid = item[0]
                    break

            if search_oid==0: # 没有连续三个以上oid,重新请求
                continue

            #search_ids = data[data['oid']==search_oid]
            search_sentences = data[data['oid'] == search_oid]['sentence']
            logging.debug(search_sentences)

            # 查找文章内容
            sql = "SELECT title, content FROM nlp_news_content where oid=%d"%int(search_oid)
            title = execute_query(sql)[0][0]
            content = execute_query(sql)[0][1]

            #logging.debug(title)
            #logging.debug(content)

            para = {'parameter':'{"title":"%s","content": "%s"}'%(title, content)}
            headers = {"Content-Type":"application/x-www-form-urlencoded;charset=utf-8"}

            para_result = requests.post(self.para_weight_url, headers=headers, data = para).json()
            logging.debug(para_result)

            self.assertTrue(para_result['stat']==0)

            # 确认idx为升序排列
            idxs = [item['idx'] for item in para_result['message']]
            self.assertTrue(idxs==sorted(idxs))

            sentences = [item['sens'] for item in para_result['message']]
            sen_tags = [item['sen_tag'] for item in para_result['message']]
            # 展开列表中的列表
            sentences = sum(sentences, [])
            sen_tags = sum(sen_tags, [])

            datafrm = pd.DataFrame({"sentence":sentences,"sen_tag":sen_tags})

            #logging.debug(datafrm['sen_tag'])
            analyzed_sentences = datafrm[datafrm['sen_tag'].apply(has_intersection_with_dict_knews_labels,args=(valid_sen_tags,))]['sentence']

            #logging.debug(analyzed_sentences)

            if len(analyzed_sentences)>0:
                found=True

            self.assertTrue(list(analyzed_sentences)==list(search_sentences))

