import os.path
from fastapi import APIRouter, Request, BackgroundTasks
from fastapi.params import Depends
from jieba.analyse import ChineseAnalyzer
# from whoosh.qparser import QueryParser
# from src.common.dbase.mongdb import MongDb
from loguru import logger
from sqlalchemy.orm import Session
from whoosh.fields import *
from whoosh.index import create_in
from whoosh.index import open_dir
from src.common.dbase import crud, models
from src.common.dbase.database import session, engine
from src.common.jbword.customdesign import CustomQueryParser

logger.add("./log/personalgain-server_{time:YYYY_MM_DD}.log", rotation="20 MB", encoding='utf-8', enqueue=True)

sroute = APIRouter(prefix="/server")
models.Base.metadata.create_all(bind=engine, checkfirst=True)

# mg = MongDb()
# topic_cotion = mg.create_conn('topic', ['status', 'title', 'tags', 'updtime'])

idxpath = os.path.join(os.getcwd(), 'topic_index')
if not os.path.exists(idxpath):
    os.makedirs(idxpath)
analyzer = ChineseAnalyzer()
schema = Schema(
    id=ID(stored=True),
    title=TEXT(stored=True, analyzer=ChineseAnalyzer()),
    body=TEXT(stored=True, analyzer=ChineseAnalyzer()),
    tags=TEXT(stored=True, analyzer=ChineseAnalyzer()),
    msg=TEXT(stored=True, analyzer=ChineseAnalyzer()),
    updtime=DATETIME(stored=True)
)

def get_db():
    db = session()
    try:
        yield db
    finally:
        db.close()


async def analysis_topic(data: list):
    # analyzer = ChineseAnalyzer()
    # analyzer.tokenizer = CustomChineseTokenizer()
    # schema = Schema(id=ID(stored=True), topic=TEXT(stored=True, analyzer=analyzer))
    # schema = Schema(
    #     id=ID(stored=True),
    #     title=TEXT(stored=True, analyzer=ChineseAnalyzer()),
    #     body=TEXT(stored=True, analyzer=ChineseAnalyzer()),
    #     tags=TEXT(stored=True, analyzer=ChineseAnalyzer()),
    #     msg=TEXT(stored=True, analyzer=ChineseAnalyzer())
    # )

    for dt in data:
        if isinstance(dt, dict):
            idx = create_in(idxpath, schema=schema)
            wrt = idx.writer()
            wrt.add_document(title=dt.get('title'), id=dt.get('id'), tags=dt.get('tags'), body=dt.get('body'),
                             msg=dt.get('msg'), updtime=dt.get('updtime'))
        else:
            idx = open_dir(idxpath, readonly=False, schema=schema)
            wrt = idx.writer()
            wrt.add_document(title=dt.title, id=dt.id, tags=dt.tags, body=dt.body, msg=dt.msg, updtime=dt.updtime)
        wrt.commit()


async def search_topic(qkey: str):
    idxpath = os.path.join(os.getcwd(), 'topic_index')
    ix = open_dir(idxpath)
    searcher = ix.searcher()
    query_parser = CustomQueryParser('msg', schema=ix.schema)
    # query_parser = QueryParser('title', schema=ix.schema)
    q = query_parser.parse(qkey)
    # print("=====q:", q)
    result = searcher.search(q)
    return result


@sroute.get("/sec/topic", tags=['show'], summary="获取主题信息")
async def get_topic(request: Request,db:Session=Depends(get_db)):
    parms = dict(request.query_params)
    logger.info(f"查询请求的信息为：{parms}")
    cond, results = {}, []
    try:
        # 如果tp为mongo，代表精确搜索
        if parms.get('tp') and parms['tp'] == 'mongo':
            if parms.get('topic'):
                cond['cutwd'] = {'$regex': parms.get('topic')}
            #     results = topic_cotion.find(cond).sort({"updtime": -1}).limit(1000).to_list()
            # else:
            #     results = topic_cotion.find().sort({"updtime": -1}).limit(1000).to_list()
            results =await crud.sel_topic(db, cond)
        else:
            if parms.get('topic'):
                sec_rz = await search_topic(parms['topic'])
                if sec_rz:
                    for r in sec_rz:
                        results.append(
                            {"_id": r.get('id'), "title": r.get('title'), "tags": r.get('tags'), "body": r.get('body'),
                             "updtime": r.get('updtime')})
            else:
                # results = topic_cotion.find().sort({"updtime": -1}).limit(1000).to_list()
                results = await crud.sel_topic(db, cond)
        # print("results", results)
        return {"status": 0, "msg": "", "data": results}
    except Exception as e:
        logger.error(f"查询信息是出错了！{e}")
        return {"status": 1, "msg": f"查询信息是出错了！{e}", "data": []}


@sroute.post("/add/topic", tags=['add'], summary="添加词条")
async def add_topic(request: Request, task:BackgroundTasks,db:Session=Depends(get_db)):
    body = await request.json()
    try:
        # 写入mongodb数据库时，把title和body用空格连接，然后使用jieba分词，然后，在拆分的分词一起写入数据库
        msg = f"{body.get('title')} {body.get('body')}"
        cutwd = [body.get('title')]
        anz = analyzer(msg)
        cutwd.extend(list(set(a.text for a in anz)))
        mgbody = body.copy()
        mgbody.update({"cutwd": ",".join(cutwd)})
        logger.info(f"写入mongodb的信息为：{mgbody}")
        # uid = mg.in_data(topic_cotion, mgbody)
        uid =await crud.operate_topic(db,mgbody)
        # 下面是进行分词索引
        body.update({"id": uid, "msg": msg, "updtime": datetime.datetime.now()})
        # print("=========", body)
        task.add_task(analysis_topic,[body])
        # await analysis_topic([body])
        if uid:
            return {"status": 0, "msg": "", "data": [uid]}
    except Exception as e:
        logger.error(f"添加词条时，出错了！{e}")
        return {"status": 1, "msg": f"添加词条时，出错了！{e}", "data": []}
