import os.path

from pymongo import MongoClient
#  pip install -i https://pypi.tuna.tsinghua.edu.cn/simple streamlit
from conf import *
from bson.son import SON

# 连接到MongoDB
# client = MongoClient("mongodb://localhost:27017/")
mongo_uri = f"mongodb://{username}:{password}@{ip}:{port}/{dbname}?authSource=admin"
# 创建MongoDB客户端
client = MongoClient(mongo_uri)
# 获取所有数据库的名称
# db_names = client.list_database_names()
# 创建下拉菜单，选择数据库
selected_db = dbname

def new_client(url,db,coll):
    return MongoClient(url)[db][coll]


def count_field_distribution(collection, field_name):
    # 连接到MongoDB，默认在本地运行的MongoDB实例
    # 聚合管道操作，用于统计每个字段取值的分布
    pipeline = [
        {"$group": {
            "_id": f"${field_name}",  # 使用字段值作为分组依据
            "count": {"$sum": 1}  # 对每个分组计数
        }},
        {"$sort": SON([("count", -1), ("_id", 1)])}  # 可选：按计数降序排序，再按字段值升序排序
    ]

    # 执行聚合管道操作
    results = collection.aggregate(pipeline)
    d = {}
    for result in results:
        d[result['_id']] = result['count']
    return d


db = client[selected_db]
collection_names = db.list_collection_names()
from local.processor import write_json
import pprint
import pandas as pd

from en2zh import *
from feature import platform2text_col
def get_keyword(all_search_keys, text):
    return [k for k in all_search_keys if k in text]

def get_data(x, col_1):
    """

    :param x:
    :param col_1: xxx.xxx.xxx的形式 ， 所以要用split
    :return:
    """
    title = ''
    for key in col_1.split('.'):
        if title == '':
            print(x.keys())
            if key in x:
                # 如果有字段， 就取对应的值
                title = x[key]
            else:
                # 没有就直接退出
                title = ''
                break
        else:
            if key in title:
                title = title[key]
            else:
                title = ''
                break
    return title


for c in collection_names:
    if 'items' in c  :
        print('collection', c)
        coll_ob = db[c]
        # 计算当前collection 下面各个platform的个数
        dist = count_field_distribution(coll_ob, 'platform')

        pprint.pprint(dist)
        # 这个是取出1000条数据， 保存成json
        # 重新链接mongo
        connector = new_client(mongo_uri, selected_db, c)

        for plat in dist:
            #  获取平台的中文名
            plat_zh = mapping_d.get(plat, plat)
            #  获取collection的中文
            coll_zh = mapping_d.get(c, c)
            # 生成过了就不要重复生成了
            if os.path.exists(f'df/{coll_zh}_{plat_zh}.xlsx'):
                continue
            i = 0
            # res = []
            pd_platrows = []
            # if plat not in ['zhihu']:
            #     continue
            col_1 = platform2text_col[plat]['title']
            col_2 = platform2text_col[plat]['content']
            #  指定要取出的field
            fields = [col_1,col_2]
            fields = [e for e in fields if e!='?']+['search_key']
            fields_d = {k:1 for k in fields}
            for x in connector.find({'platform': plat},fields_d) :
                del x['_id']

                if plat in platform2text_col:
                    title, content  = '',''
                    print(plat,col_1,col_2 )
                    # 获取title 和content。 要注意下title = 空的情况
                    if col_1 != '?':
                        title  = get_data(x, col_1)
                    if col_2!='?':
                        content = get_data(x, col_2)
                    #  放到df 的data里面，一行有3个列
                    pd_platrows.append((title,content,x['search_key']))
                # if plat == 'zhihu':
                #
                #     pd_platrows.append([x['highlight'].get('title',''), x['object'].get('content','')])
                # else:
                #     continue

            # res.append(x)

            import re


            # Function to clean illegal characters
            def clean_illegal_characters(value):
                if isinstance(value, str):
                    # Define the illegal characters pattern
                    illegal_characters = re.compile(r'[\x00-\x1F\x7F-\x9F]')
                    # Remove illegal characters
                    return illegal_characters.sub('', value)
                return value


            # Load your DataFrame (assuming `df` is your DataFrame)
            # df = pd.read_csv('your_data.csv')  # Example of loading data

            # Apply the cleaning function to each cell in the DataFrame

            df = pd.DataFrame(pd_platrows, columns=['标题', '内容','搜索词'])
            # 去掉一些乱码，不然不能保存
            df = df.applymap(clean_illegal_characters)

            df['平台'] = plat_zh
            # 保存df
            df.to_excel(f'df/{coll_zh}_{plat_zh}.xlsx')
        #
        #     write_json(res,f'data100/{c}_{plat}.json')
