# -*-coding:utf-8 -*-
'''
@Author     : liuying
@Date       : 2021/4/5
@File       : searchdata_red.py
'''

from functools import reduce
import pandas as pd
from DataSearchRematchByRe.utils.elastic_search import ESPostRed, ESUserRed, ESStarRed
import re


class SeachDataRed(object):
    
    def __init__(self):
        pass

    def exclude_word_match(self, nwords_string):

        not_clauses = []
        if nwords_string != "":
            if "/" in nwords_string:
                word = nwords_string.split("/")
                not_clause1 = [{"match_phrase": {"content": i}} for i in word]
                not_clause2 = [{"match_phrase": {"title": i}} for i in word]  #
                not_clause3 = [{"match_phrase": {"ocr.text": i}} for i in word]
            else:
                word = nwords_string.split('¥') 
                not_clause1 = [{"match_phrase": {"content": i}} for i in word]
                not_clause2 = [{"match_phrase": {"title": i}} for i in word]
                not_clause3 = [{"match_phrase": {"ocr.text": i}} for i in word]
            not_clauses = [not_clause1, not_clause2, not_clause3]

        run_function = lambda x, y: x if y in x else x + [y]
        not_clauses = reduce(run_function, [[], ] + not_clauses)

        return not_clauses

    def must_word_match(self, mwords_string):
        # print('关键词: ' + mwords_string)

        if '/' in mwords_string:
            words = mwords_string.split('/')
            should_clause1 = [{"match_phrase": {"content": w}} for w in words]
            should_clause2 = [{"match_phrase": {"title": w}} for w in words]
            should_clause3 = [{"match_phrase": {"categories": w}} for w in words]
            should_clause4 = [{"match_phrase": {"keywords.text": w}} for w in words]
            should_clause5 = [{"nested": {"path": "cooperate_binds", "query": {"match_phrase": {"cooperate_binds.name.text": w}}}} for
                w in words]
            should_clause6 = [{"match_phrase": {"ocr.text": w}} for w in words]
            should_clause7 = [{"match_phrase": {"topics_name": w}} for w in words]

        elif '|' in mwords_string:
            words = mwords_string.split('|')
            should_clause1 = [{"match_phrase": {"content": w}} for w in words]
            should_clause2 = [{"match_phrase": {"title": w}} for w in words]
            should_clause3 = [{"match_phrase": {"categories": w}} for w in words]
            should_clause4 = [{"match_phrase": {"keywords.text": w}} for w in words]
            should_clause5 = [{"nested": {"path": "cooperate_binds", "query": {"match_phrase": {"cooperate_binds.name.text": w}}}} for
                w in words]  # ？{
            should_clause6 = [{"match_phrase": {"ocr.text": w}} for w in words]
            should_clause7 = [{"match_phrase": {"topics_name": w}} for w in words]

        else:
            words = mwords_string.split('￥')
            should_clause1 = [{"match_phrase": {"content": w}} for w in words]
            should_clause2 = [{"match_phrase": {"title": w}} for w in words]
            should_clause3 = [{"match_phrase": {"categories": w}} for w in words]
            should_clause4 = [{"match_phrase": {"keywords.text": w}} for w in words]
            should_clause5 = [{"nested": {"path": "cooperate_binds", "query": {"match_phrase": {"cooperate_binds.name.text": w}}}} for
                w in words]  # ？{
            should_clause6 = [{"match_phrase": {"ocr.text": w}} for w in words]
            should_clause7 = [{"match_phrase": {"topics_name": w}} for w in words]

        should_clauses = [should_clause1, should_clause2, should_clause3, should_clause4, should_clause5, should_clause6, should_clause7]
        run_function = lambda x, y: x if y in x else x + [y]
        should_clauses = reduce(run_function, [[], ] + should_clauses)

        return should_clauses, words

    def query_data(self, k, mwords_string, words, should_clause, time, not_clause):

        count1 = []
        if '/' in mwords_string:
            query = {
                "_source": [ # 返回指定的字段
                    "post_id",
                    "user_id",
                    "content",
                    "title",
                    "post_create_time",
                    "cooperate_binds.name",
                    "like_count",
                    "comment_count",
                    "share_count",
                    "collect_count",
                    "post_type",
                    "categories",
                    "keywords",
                    "topics_name",
                    "ocr"
                ],
                "query": {
                    "bool": {# bool查询
                        "should": [  # should语句下列中任一成立
                            {
                                "bool": {
                                    "must": should_clause[0]  ##should_clause1的词同时含有
                                }
                            },
                            {
                                "bool": {
                                    "must": should_clause[1]  ##should_clause2的词同时含有
                                }
                            },
                            {
                                "bool": {
                                    "must": should_clause[2]  ##should_clause3的词同时含有
                                }
                            },
                            {
                                "bool": {
                                    "must": should_clause[3]  ##should_clause4的词同时含有
                                }
                            },
                            {
                                "bool": {
                                    "must": should_clause[4]  ##should_clause5的词同时含有
                                }
                            },
                            {
                                "bool": {
                                    "must": should_clause[5]  ##should_clause6的词同时含有
                                }
                            },
                            {
                                "bool": {
                                    "must": should_clause[6]  ##should_clause7的词同时含有
                                }
                            }
                        ],
                        "minimum_should_match": 1,
                        # 最小匹配度，至少需要match 1个clause，在bool query中minimum_should_match表示should里的至少满足的数量
                        # 用于控制bool中should列表，至少匹配几个条件才召回doc
                        "must": [ # must语句里的条件必须要满足
                            {
                                "range": {
                                    "post_create_time": {
                                        "gte": time[0],
                                        "lte": time[1]
                                    }
                                }
                            }
                        ],
                        "must_not": not_clause  # 必须不满足must_not定义的条件
                    }
                }
            }
        elif '|' in mwords_string:
            query = {
                "_source": [
                    "post_id",
                    "user_id",
                    "content",
                    "title",
                    "post_create_time",
                    "cooperate_binds.name",
                    "like_count",
                    "comment_count",
                    "share_count",
                    "collect_count",
                    "post_type",
                    "categories",
                    "keywords",
                    "topics_name",
                    "ocr"
                ],
                "query": {
                    "bool": {
                        "should": should_clause,
                        "minimum_should_match": 1,
                        "must": [
                            {
                                "range": {
                                    "post_create_time": {
                                        "gte": time[0],
                                        "lte": time[1]
                                    }
                                }
                            }
                        ],
                        "must_not": not_clause
                    }
                }
            }
        else:
            query = {
                "_source": [
                    "post_id",
                    "user_id",
                    "content",
                    "title",
                    "post_create_time",
                    "cooperate_binds.name",
                    "like_count",
                    "comment_count",
                    "share_count",
                    "collect_count",
                    "post_type",
                    "categories",
                    "keywords",
                    "topics_name",
                    "ocr"
                ],
                "query": {
                    "bool": {
                        "should": should_clause,
                        "minimum_should_match": 1,
                        "must": [
                            {
                                "range": {
                                    "post_create_time": {
                                        "gte": time[0],
                                        "lte": time[1]
                                    }
                                }
                            }
                        ],
                        "must_not": not_clause
                    }
                }
            }
        ep = ESPostRed.scan(query=query)
        for j2 in ep:
            j2['_source']['sum_match_keyword'] = k
            j2['_source']['match_keyword'] = mwords_string
            if j2['_source'].get('post_id', ''):
                j2['_source']['play_url'] = 'https://www.xiaohongshu.com/discovery/item/' + j2['_source'][
                    'post_id']
            if j2['_source'].get('cooperate_binds', ''):
                j2['_source']['name'] = j2['_source']['cooperate_binds'][0]['name']
            count1.append(j2['_source'])
        # print(len(count1))

        return count1

    def get_data(self, count):
        
        user_ids = []
        for i1 in count:
            if 'user_id' in i1:
                user_ids.append(i1['user_id'])
        user_ids = list(set(user_ids))  # 将user_ids转换成集合（去重）在转换为列表
        # print(len(user_ids))

        count1 = []
        size = 1000  # ？？
        for i6 in range(len(user_ids) // size + 1):
            # print(i6)
            body = {
                "size": size,
                "_source": [
                    "user_id",
                    "red_id",
                    "nickname",
                    "follower_num",
                    "description",
                    "collect_count",
                    "liked_count",
                    "note_count",
                    "region",
                    "description",
                    "official_verified",
                    "raw_data.red_official_verify_content",
                    "sex"
                ],
                "query": {
                    "terms": {
                        "user_id": user_ids[i6 * size: (i6 + 1) * size]
                    }
                }
            }
            eu = ESUserRed.search(body)
            for i3 in eu['hits']['hits']:
                if i3['_source'].get('user_id', ''):
                    i3['_source']['user_id'] = str(i3['_source']['user_id'])
                    i3['_source']['主页链接'] = 'https://www.xiaohongshu.com/user/profile/' + i3['_source']['user_id']
                if i3['_source'].get('raw_data'):
                    if i3['_source']['raw_data'].get('red_official_verify_content'):
                        i3['_source']['red_official_verify_content'] = i3['_source']['raw_data'][
                            'red_official_verify_content']
                count1.append(i3['_source'])

        p = {i4['user_id']: i4 for i4 in count1}

        for i5 in count:
            if 'like_count' in i5:
                i5['点赞数'] = i5['like_count']
            if 'collect_count' in i5:
                i5['收藏数'] = i5['collect_count']
            if 'share_count' in i5:
                i5['分享数'] = i5['share_count']
            if 'comment_count' in i5:
                i5['评论数'] = i5['comment_count']
            if i5.get('user_id', ''):
                user = p.get(i5['user_id'], '')
                if 'follower_num' in user:
                    i5['follower_num'] = user['follower_num']
                if 'nickname' in user:
                    i5['nickname'] = user['nickname']
                if 'liked_count' in user:
                    i5['总获赞'] = user['liked_count']
                if 'collect_count' in user:
                    i5['总收藏'] = user['collect_count']
                if 'note_count' in user:
                    i5['笔记数'] = user['note_count']
                if 'description' in user:
                    i5['up_description'] = user['description']
                if 'region' in user:
                    i5['地域'] = user['region']
                if '主页链接' in user:
                    i5['主页链接'] = user['主页链接']
                # if 'sex' in user:
                #     i5['sex'] = test(user['sex'])
                if 'red_id' in user:
                    i5['red_id'] = user['red_id']
                if 'description' in user:
                    i5['description'] = user['description']
                if 'official_verified' in user:
                    i5['official_verified'] = user['official_verified']
                if 'red_official_verify_content' in user:
                    i5['red_official_verify_content'] = user['red_official_verify_content']

        count3 = []
        size = 2000
        for i7 in range(len(user_ids) // size + 1):
            # print(i7)

            bd = {
                "size": size,
                "_source": [
                    "user_id",
                    "tags",
                    "belongMcn",
                    "picturePrice",
                    "videoPrice",
                    "data_type",
                    "notes_list"
                ],
                "query": {
                    "terms": {
                        "user_id": user_ids[i7 * size: (i7 + 1) * size]
                    }
                }
            }
            ess = ESStarRed.search(bd)
            for j in ess['hits']['hits']:
                if j['_source'].get('user_id', ''):
                    j['_source']['user_id'] = str(j['_source']['user_id'])
                count3.append(j['_source'])

        p3 = {j3['user_id']: j3 for j3 in count3}
        for j4 in count:
            if j4.get('user_id', ''):
                star = p3.get(j4['user_id'], '')
                if 'tags' in star:
                    j4['tags'] = star['tags']
                if 'belongMcn' in star:
                    j4['belongMcn'] = star['belongMcn']
                if 'picturePrice' in star:
                    j4['picturePrice'] = star['picturePrice']
                if 'videoPrice' in star:
                    j4['videoPrice'] = star['videoPrice']
                if 'notes_list' in star:
                    j4['notes_list'] = star['notes_list']
                if 'data_type' in star:
                    j4['data_type'] = star['data_type']
        cou = []
        for j9 in count:
            obj = {}
            if 'sum_match_keyword' in j9:
                obj['sum_match_keyword'] = j9['sum_match_keyword']
            else:
                obj['sum_match_keyword'] = ''
            if 'match_keyword' in j9:
                obj['match_keyword'] = j9['match_keyword']
            else:
                obj['match_keyword'] = ''
            if 'user_id' in j9:
                obj['user_id'] = j9['user_id']
            else:
                obj['user_id'] = ''
            if 'red_id' in j9:
                obj['red_id'] = j9['red_id']
            else:
                obj['red_id'] = ''
            if 'description' in j9:
                obj['description'] = j9['description']
            else:
                obj['description'] = ''
            if 'official_verified' in j9:
                obj['official_verified'] = j9['official_verified']
            else:
                obj['official_verified'] = ''
            if 'name' in j9:
                obj['name'] = j9['name']
            else:
                obj['name'] = ''
            if 'follower_num' in j9:
                obj['follower_num'] = int(j9['follower_num'])
            else:
                obj['follower_num'] = ''
            if 'nickname' in j9:
                obj['nickname'] = j9['nickname']
            else:
                obj['nickname'] = ''
            if '总获赞' in j9:
                obj['总获赞'] = j9['总获赞']
            else:
                obj['总获赞'] = ''
            if '总收藏' in j9:
                obj['总收藏'] = j9['总收藏']
            else:
                obj['总收藏'] = ''
            if '笔记数' in j9:
                obj['笔记数'] = j9['笔记数']
            else:
                obj['笔记数'] = ''
            if 'up_description' in j9:
                obj['up_description'] = j9['up_description']
            else:
                obj['up_description'] = ''
            if '地域' in j9:
                obj['地域'] = j9['地域']
            else:
                obj['地域'] = ''
            if 'sex' in j9:
                obj['sex'] = j9['sex']
            else:
                obj['sex'] = ''
            if '主页链接' in j9:
                obj['主页链接'] = j9['主页链接']
            else:
                obj['主页链接'] = ''
            if 'post_id' in j9:
                obj['post_id'] = j9['post_id']
            else:
                obj['post_id'] = ''
            if 'title' in j9:
                obj['title'] = j9['title']
            else:
                obj['title'] = ''
            if 'content' in j9:
                obj['content'] = j9['content']
            else:
                obj['content'] = ''
            if '点赞数' in j9:
                obj['点赞数'] = j9['点赞数']
            else:
                obj['点赞数'] = ''
            if '收藏数' in j9:
                obj['收藏数'] = j9['收藏数']
            else:
                obj['收藏数'] = ''
            if '分享数' in j9:
                obj['分享数'] = j9['分享数']
            else:
                obj['分享数'] = ''
            if '评论数' in j9:
                obj['评论数'] = j9['评论数']
            else:
                obj['评论数'] = ''
            if 'play_url' in j9:
                obj['url'] = j9['play_url']
            else:
                obj['url'] = ''
            if 'post_create_time' in j9:
                obj['post_create_time'] = j9['post_create_time']
            else:
                obj['post_create_time'] = ''
            if 'tags' in j9:
                obj['tags'] = j9['tags']
            else:
                obj['tags'] = ''
            if 'belongMcn' in j9:
                obj['belongMcn'] = j9['belongMcn']
            else:
                obj['belongMcn'] = ''
            if 'picturePrice' in j9:
                obj['picturePrice'] = j9['picturePrice']
            else:
                obj['picturePrice'] = ''
            if 'videoPrice' in j9:
                obj['videoPrice'] = j9['videoPrice']
            else:
                obj['videoPrice'] = ''
            if 'post_type' in j9:
                obj['post_type'] = j9['post_type']
            else:
                obj['post_type'] = ''
            if 'categories' in j9:
                obj['categories'] = j9['categories']
            else:
                obj['categories'] = ''
            if 'keywords' in j9:
                obj['keywords'] = j9['keywords']
            else:
                obj['keywords'] = ''
            if 'ocr' in j9:
                obj['ocr'] = j9['ocr']
            else:
                obj['ocr'] = ''
            if 'topics_name' in j9:
                obj['topics_name'] = j9['topics_name']
            else:
                obj['topics_name'] = ''
            if 'red_official_verify_content' in j9:
                obj['red_official_verify_content'] = j9['red_official_verify_content']
            else:
                obj['red_official_verify_content'] = ''
            if 'data_type' in j9:
                obj['data_type'] = j9['data_type']
            else:
                obj['data_type'] = ''
            obj['target'] = ''
            cou.append(obj)

        items = pd.DataFrame(cou)
        # 删除重复项的函数 subset表示要删除的重复项指定列
        items = items.drop_duplicates(subset=['sum_match_keyword', 'post_id'], keep='first')
        items = items.to_dict(orient='records')
        print("items: ", len(items))

        return items

    def re_match(self, items, kw1_kw2_dic):
        """ kw1前后5个字以内出现kw2, 算匹配成功 """

        new_items = []
        for item in items:
            matched_items = []
            for col in ["title", "content", "ocr"]:
                text = item[col]
                for k1, k2 in kw1_kw2_dic.items():
                    for kw in k2:
                        pattern1 = re.compile(k1 + r'[^，。,.】·：；;▪+、➕\n\t\r]{0,5}?' + kw)
                        matched_items1 = re.findall(pattern1, str(text).lower())
                        pattern2 = re.compile(kw + r'[^，。,.】·：；;▪+、➕\n\t\r]{0,5}?' + k1)
                        matched_items2 = re.findall(pattern2, str(text).lower())
                        if matched_items1:
                            matched_items.extend(matched_items1)
                        if matched_items2:
                            matched_items.extend(matched_items2)
            if len(matched_items) > 0:
                item["re_matched_item"] = matched_items[0]
                new_items.append(item)

        print("new_items: ", len(new_items))

        items = pd.DataFrame(items)
        new_items = pd.DataFrame(new_items)

        columns1 = ["match_keyword", "user_id", "red_id", "follower_num", "nickname", "总获赞", "总收藏", "笔记数", "地域", "sex",
            "up_description", "主页链接", "post_id", "title", "content", "keywords", "categories", "ocr", "点赞数", "收藏数",
            "分享数", "评论数", "url", "post_create_time", "name", "post_type", "tags", "belongMcn", "picturePrice", "videoPrice",
            "target", "official_verified", "topics_name", "red_official_verify_content", "data_type", "sum_match_keyword"]
        columns2 = ["match_keyword", "user_id", "red_id", "follower_num", "nickname", "总获赞", "总收藏", "笔记数", "地域", "sex",
            "up_description", "主页链接", "post_id", "title", "content", "keywords", "categories", "ocr", "点赞数", "收藏数",
            "分享数", "评论数", "url", "post_create_time", "name", "post_type", "tags", "belongMcn", "picturePrice", "videoPrice",
            "target", "official_verified", "topics_name", "red_official_verify_content", "data_type", "sum_match_keyword", "re_matched_item"]

        with pd.ExcelWriter("red_match_test.xlsx", engine='xlsxwriter', options={'strings_to_urls': False}) as writer:
            items.to_excel(writer, sheet_name='sheet1', index=False, columns=columns1)
            new_items.to_excel(writer, sheet_name="sheet2", index=False, columns=columns2)


    def run(self, excluded_words_string, must_words_string, start_date, end_date, kw1_kw2_dic):

        # 获取 "排除条件"
        not_clause = self.exclude_word_match(excluded_words_string)
        # 获取 "匹配条件"
        should_clause, words = self.must_word_match(must_words_string)
        # 根据 排除条件 和 匹配条件 筛选数据
        count = self.query_data("xxxxx", must_words_string, words, should_clause, [start_date, end_date], not_clause)
        items = self.get_data(count)

        self.re_match(items, kw1_kw2_dic)


if __name__ == "__main__":

    excluded_words_string = ""

    must_words_string = "lancome|兰蔻"
    start_date = "2021-08-25"
    end_date = "2021-08-26"
    kw1_kw2_dic = {
        "兰蔻": "精华",
        "粉底液": "推荐",
        "面霜": "美白"
    }

    s = SeachDataRed()

    s.run(excluded_words_string, must_words_string, start_date, end_date, kw1_kw2_dic)

