import base64
import  datetime
import json
import uuid
from pathlib import Path
import re
from typing import Dict

from sqlalchemy import func, and_,or_
from sqlalchemy.orm import  session
from starlette import status
from starlette.requests import Request
from itertools import count

from Model.SqlDef.commonText import get_commonText_Search_By_keywords
from Model.community.common_text import Commontext, first_CommonComment, second_CommonComment
from Model.community.community import community_index_pre, user_to_community_expand
from Model.community.response.commontext import CommonText_response, CommonText_response_byuser, \
    CommonText_response_gettext, CommonText_response_first_comment, CommonText_response_second_comment
from Model.user.User import user_expand, user
from api.middleware import judge_token
from config.comunity import *
from Model.default import SessionLocal, get_db
from fastapi import APIRouter, Depends, Body
from func.file.Type import FileTypeChecker
from concurrent.futures import ThreadPoolExecutor

from func.uuid.bydatetime import get_to_second_now

encodespool=ThreadPoolExecutor(10)
community=APIRouter(prefix="/community",tags=["community"])

def get_file_typecheck():
    return FileTypeChecker()


# 由于前端采用base64存储发送
def decode_base64(data,db:get_db()):
    binary_data = base64.b64decode(data)
    output_file_path = TEXT_IMAGES_DIR
    with open(output_file_path+'', "wb") as file:
        file.write(binary_data)

def deal_commonfile(file):
    output_file_path = TEXT_IMAGES_DIR
    with open(output_file_path, "wb") as file:
        file.write(file.read())


def deal_replace_base64src(str):
    html_string = str
    # 使用正则表达式匹配并替换 base64 src
    pattern = r'(<img\s+src=")data:image/[^;]+;base64,[^"]+(">)'
    replacement = r'\1https://example.com/new-image.png\2'  # 替换为新的图片 URL
    # 执行替换
    new_html_string = re.sub(pattern, replacement, html_string)
    return new_html_string



#判断是否为有效base64字符串
def is_base64_file(base64_str):
    if not isinstance(base64_str, str):
        return False
    # 正则匹配 `data:[<mediatype>][;base64],<data>`
    pattern = r"^data:(image|video|application)\/[a-zA-Z+-]+;base64,[a-zA-Z0-9+/=]+$"
    return bool(re.fullmatch(pattern, base64_str))


# 真正执行替换


def replace_src_sequential(html_string, new_src_list):
    counter = count()

    def replace_one_src(match):
        i = next(counter)
        if i < len(new_src_list):
            return f'src="{new_src_list[i]}"'
        return match.group(0)  # 如果新 URL 不够，则保持原样
    pattern = r'src="([^"]+)"'
    new_html = re.sub(pattern, replace_one_src, html_string)
    return new_html



def save_base64_image(base64_str: str, save_dir: str = TEXT_IMAGES_DIR):
    try:
        # 创建目录
        Path(save_dir).mkdir(exist_ok=True)

        # 解析 Base64
        header, data = base64_str.split(",", 1)
        file_ext = header.split("/")[1].split(";")[0]  # 如：image/png -> png

        # 生成随机文件名
        filename = f"{uuid.uuid4()}.{file_ext}"
        filepath = Path(save_dir) / filename

        # 解码并保存
        with open(filepath, "wb") as f:
            f.write(base64.b64decode(data))

        return str(TEXT_IMAGES_GET+filename)
    except Exception as e:
        raise ValueError(f"无效的Base64图片数据: {str(e)}")

# 获取首页hottip
@community.get("/index/hottips")
def get_hottips(db: session = Depends(get_db)):
    """
    :return:
    """
    try:
        date = datetime.datetime.now(datetime.timezone.utc).date()
        # db.query()
        return 1
    except:
        pass



@community.get("/index/index_recommend")
def get_index_recommend(type:int,id:int=0):
    """
    用于获取首页轮播图推荐内容的信息
    由于处理原因,此处不采用图片插入
    :param date:
    :param type:
    :return: []:
                 data={
                 'txt':txt,
                 'images':images_url,
                 'date':date,
                 'id':i.id,
                 'title':i.title,
                  }
    """
    session=SessionLocal()
    Data=[]
    images_url=[]
    date=datetime.datetime.now(datetime.timezone.utc).date()
    try:
        if type==0:
          pre_data=session.query(community_index_pre).filter_by(date=str(date)).all()
          if not pre_data:
              pre_data=session.query(community_index_pre).filter_by(date=str('2025-03-21')).all()
          for i in pre_data:
              with open(f"{i.txt}",'r',encoding='utf-8', errors='replace')as f:
                  txt=f.read()
              if i.images :
                  for j in i.images.split('_+_'):
                      if len(j)!=0:
                         images_url.append(j)
                      #默认处理文件是按照顺序上传
              data={
                 'txt':txt,
                 'images':images_url,
                 'date':date,
                 'id':i.id,
                 'title':i.title,
              }
              Data.append(data)
              images_url=[]

          session.commit()
          return Data
        elif type==1:
            pre_data = session.query(community_index_pre).limit(3)
            for i in pre_data:
                with open(f"{i.txt}", 'r', encoding='utf-8', errors='replace') as f:
                    txt = f.read()
                if i.images:
                    for j in i.images.split('_+_'):
                        if len(j)!=0:
                          images_url.append(j)
                        # 默认处理文件是按照顺序上传
                data = {
                    'txt': txt,
                    'images': images_url,
                    'date': i.date,
                    'id': i.id,
                    'title': i.title,
                }
                Data.append(data)
                images_url = []
            session.commit()

            return Data
        else:

            pre_data = session.query(community_index_pre).filter_by(id=id).first()
            with open(f"{pre_data.txt}", 'r', encoding='utf-8', errors='replace') as f:
                txt = f.read()
            if pre_data.images :
                for i in pre_data.images.split('_+_'):
                    if len(i)!=0:
                     images_url.append(i)
            data = {
                'txt': txt,
                'images': images_url,
                'date': date,
                'id': pre_data.id,
                'title': pre_data.title,
            }
            return data
    except Exception as e:
        print(e)
        return 0
        pass
    finally:
        session.close()

@community.get("/index/recommend_span")
def get_index_recommend_span(db:session=Depends(get_db)):
    """
    获取首页推荐小词条,暂且采用静态的数据
    :param db:
    :return:
    """
    try:
        # db.query()
        data=["资源","图片","阅读","日常","技术","美图","旅游","风景"]
        return  data
    except Exception as e:
        pass
    finally:
        db.close()




@community.post("/write/user_text",)
async def write_user_text(request: Request,db:session=Depends(get_db),user=Depends(judge_token)):
    """
    处理文本信息的存储,目前文章不再做标签处理只做分类处理。
    :param request:
    :param db:
    :return:
    """
    form_data = await request.form()
    mymiddle_pool=[]
    url_data=[]
    # 后续做是否为专栏处理
    # 收集所有文件字段
    # 注意对base64文字做位置提取,最后在处理文本存储。
    for key, value in form_data.items():
        if key.startswith("file"):
            # 前端传输过来的是base64
            # files.append(value)
            mymiddle_pool.append(encodespool.submit(save_base64_image,value,TEXT_IMAGES_DIR))
    try:

        textdata=Commontext()
        formdata=await request.form()
        title=formdata['title']
        text=formdata['text']
        sketch=formdata['sketch']
        author=formdata['author']
        index_image=formdata['index_image'] or None
        resource=formdata['resource']
        tips=formdata['tips']
        kind=formdata['kind']
        visibility=formdata['visibility']
        username=formdata['username']
        date=datetime.datetime.now().strftime('%Y.%m.%d %H:%M:%S')
        for i in mymiddle_pool:
            result=i.result()
            url_data.append(result)
        new_text=replace_src_sequential(text,url_data)
        textdata.title=title
        textdata.date=date
        textdata.kind=TEXT_KIND_ESCAPE.get(kind,"阅读")
        textdata.sketch=sketch
        textdata.author=author
        textdata.username=username
        textdata.index_image=index_image
        textdata.resource=resource
        textdata.tips=tips
        textdata.visibility=visibility
        textdata.text=new_text
        db.add(textdata)
        db.commit()
    except Exception as e:
        print(e)
        db.rollback()
        return 0
    finally:
        db.close()
    return 1


@community.get("/get/text/kind")
async  def deal_put_kind_text(kind:str,limit:int,visibility:str,issift:int=0,db:session=Depends(get_db)):
    """
    获取文本,主要是专栏首页
    :param kind:
    :param limit:
    :param visibility:
    :param issift:
    :param db:
    :return:
    """
    try:

        if(kind=="sift"):
            pre_datas=db.query(Commontext).filter_by(issift=1).offset((limit-1)*TEXT_REQUEST_LENGTH).limit(TEXT_REQUEST_LENGTH).all()
        else:
            kind=TEXT_KIND_ESCAPE.get(kind)
            pre_datas=(db.query(Commontext).filter_by(kind=kind,visibility=visibility,issift=issift).offset((limit-1)*TEXT_REQUEST_LENGTH)
                       .limit(TEXT_REQUEST_LENGTH).all())
        Data=[]
        for i in pre_datas:
            data=CommonText_response.from_orm(i)
            Data.append(data)
        return Data
    except Exception as e:
        return []
        # return status.HTTP_500_INTERNAL_SERVER_ERROR
    finally:
        db.close()
@community.get("/get/text/by_user")
async def deal_put_user_text(user_id:int,limit:int,db:session=Depends(get_db),user=Depends(judge_token)):
    """
    根据用户获取用户对应的文本信息
    :param user_id:
    :param limit:
    :param db:
    :return:
    """
    try:
        pre_datas=db.query(Commontext).filter_by(author=user_id).offset((limit-1)*TEXT_REQUEST_LENGTH).limit(TEXT_REQUEST_LENGTH).all()
        Data=[]
        for i in pre_datas:
            data=CommonText_response_byuser.from_orm(i)
            Data.append(data)
        return Data
    except Exception as e:
        return status.HTTP_500_INTERNAL_SERVER_ERROR
    finally:
        db.close()

@community.get("/get/text/by_id")
async  def deal_put_image_by_id(id:int,userid:int,db:session=Depends(get_db)):
    """
    根据文本的ID来获取文本的信息
    :param id:
    :param db:
    :return:
    """
    try:

        data = db.query(Commontext).filter_by(id=id).first()
        data_dict = data.__dict__.copy()
        user_to_this={
            'like':0,
            'collect':0,
        }
        if userid:

            user_to_thisText = db.query(user_expand).filter_by(id=userid).first()
            if not user_to_thisText:
                user_to_thisText = user_expand(id=userid)
                db.add(user_to_thisText)

            article_info = json.loads(user_to_thisText.article_info_l_c or '{}')
            article_interactions = article_info.get(str(id), {})

            user_to_this = {
                'like': article_interactions.get('like', 0),
                'collect': article_interactions.get('collect', 0)
            }

        db.commit()

        data_dict['user_to_text'] = user_to_this
        finaldata = CommonText_response_gettext(**data_dict)

        return finaldata
    except Exception as e:
        print(e)
        db.rollback()
        return status.HTTP_500_INTERNAL_SERVER_ERROR
    finally:
        db.close()

@community.post("/set/text/user/like")
async def deal_like_text(article_id:str,user:int,author:int,type:int,db:session=Depends(get_db),userinfo=Depends(judge_token)):
    """
    处理文章浏览者喜欢
    """
    try:
        print(article_id,user,author,type)
        expandata = db.query(user_expand).filter_by(id=user).first()
        if  expandata:
                #获取数据
            pre_Data=json.loads(expandata.article_info_l_c or json.dumps({}))
                # 判断是否已经存储,并对应处理
            pre_Data.setdefault(article_id, {})['like'] = type

            expandata.article_info_l_c=json.dumps(pre_Data)
            db.commit()
        else:
                userexpand=user_expand(id=user)

                data = {
                    article_id: {

                        'like': 1,
                        'collect': 0,
                    }
                }
                userexpand.id=user
                userexpand.article_info_l_c=json.dumps(data)
                db.add(userexpand)
                db.commit()

        # 处理用户累计信息
        author_expands = db.query(user_expand).filter_by(id=author).first() or user_expand(id=author, getlikes=0)
        author_expands.getlikes=author_expands.getlikes or 0
        author_expands.getlikes = max(0, author_expands.getlikes + (1 if type else -1))
        # 处理文章的信息
        text = db.query(Commontext).filter_by(id=article_id).first()
        text.thumbs = (
            max(0, text.thumbs + (1 if type else -1)))
        db.commit()
        return status.HTTP_200_OK
    except Exception as e:
        print(e)
        db.rollback()
        return status.HTTP_500_INTERNAL_SERVER_ERROR
    finally:
        db.close()
@community.post("/set/text/user/collect")
async def deal_collect_text(article_id:str,user:int,author:int,type:int,db:session=Depends(get_db),userinfo=Depends(judge_token)):
    """
    处理文章浏览者收藏.
    """
    try:
        expandata = db.query(user_expand).filter_by(id=user).first()
        if expandata:
            # 获取数据
            pre_Data = json.loads(expandata.article_info_l_c or json.dumps({}))
            # 判断是否已经存储,并对应处理
            pre_Data.setdefault(article_id, {})['collect'] = type
            expandata.article_info_l_c = json.dumps(pre_Data)
            db.commit()
        else:
            userexpand = user_expand(id=user)
            data = {
                article_id: {
                    'like': 0,
                    'collect': 1,
                }
            }
            userexpand.id = user
            userexpand.article_info_l_c = json.dumps(data)
            db.add(userexpand)
        # 处理用户累计信息
        author_expands = db.query(user_expand).filter_by(id=author).first() or user_expand(id=author, getcollects=0)
        author_expands.getcollects= max(0, author_expands.getcollects + (1 if type else -1))
        # 处理文本信息
        text=db.query(Commontext).filter_by(id=article_id).first()
        text.collects = (
            max(0, text.collects + (1 if type else -1)))
        db.commit()
        return status.HTTP_200_OK
    except Exception as e:
        print(e)
        db.rollback()
        return status.HTTP_500_INTERNAL_SERVER_ERROR
    finally:
        db.close()

@community.post("/set/text/read")
def deal_add_text_reads(article_id:str,db:session=Depends(get_db),userinfo=Depends(judge_token)):
    """
    简单增加文章阅读数量
    :param article_id:
    :param db:
    :return:
    """
    try:
        db.query(Commontext).filter_by(id=article_id).first().reads+=1
        db.commit()
        return status.HTTP_200_OK
    except Exception as e:
        print(e)
        db.rollback()
        return status.HTTP_500_INTERNAL_SERVER_ERROR
    finally:
        db.close()

@community.get("/get/text/comments/first")
async def  deal_get_text_comments_first(userid:int,article_id:int,limit:int=1,db:session=Depends(get_db)):
    """
    获取文章一级评论
    :param article_id:
    :param limit:
    :param db:
    :return:
    """

    try:
        comments=(db.query(first_CommonComment)
                  .filter_by(id=article_id).order_by(first_CommonComment.date.desc())
                  .offset((limit-1)*TEXT_COMMENT_LENGTH).limit(TEXT_COMMENT_LENGTH).all())
        pre_data=[]
        if comments:

            if userid:

                userexpand = db.query(user_expand).filter_by(id=userid).first()
                if not userexpand:
                    userexpand = user_expand(id=userid,user_id=userid)
                    db.add(userexpand)

                comment_info = json.loads(userexpand.comments_likes_f or '{}')
                for i in comments:
                    middle = i.__dict__.copy()
                    comment_interactions = comment_info.get(i.uuid, {})
                    user_to_this = {
                        'like': comment_interactions.get('like', 0),
                    }
                    middle['author_image'] = i.user_key.head_image
                    middle['f_username'] = i.user_key.username
                    middle['user_to_comment']=user_to_this
                    pre_data.append(CommonText_response_first_comment(**middle))
            else:

                for i in comments:
                    middle = i.__dict__.copy()
                    middle['author_image'] = i.user_key.head_image
                    middle['f_username']=i.user_key.username
                    pre_data.append(CommonText_response_first_comment(**middle))
        return pre_data

    except Exception as e:
        print(e)
        return status.HTTP_500_INTERNAL_SERVER_ERROR
    finally:
        db.close()
@community.get("/get/text/comments/second")
async def  deal_get_text_comments_second(userid:int,f_uuid:str,article_id:int,f_author:int,limit:int=1,db:session=Depends(get_db)):
    """
    获取文章二级评论
    :param article_id:
    :param f_author:
    :param limit:
    :param db:
    :return:
    """
    try:
        comments=(db.query(second_CommonComment)
                  .filter_by(id=article_id,f_uuid=f_uuid).order_by(second_CommonComment.date.desc())
                  .offset((limit-1)*TEXT_COMMENT_LENGTH).limit(TEXT_COMMENT_LENGTH).all())
        pre_data=[]
        if comments:
            if userid:
                userexpand = db.query(user_expand).filter_by(id=userid).first()
                if not userexpand:
                    userexpand = user_expand(id=userid,user_id=userid)
                    db.add(userexpand)
                comment_info = json.loads(userexpand.comments_likes_s or '{}')
                for i in comments:

                    middle = i.__dict__.copy()
                    comment_interactions = comment_info.get(i.uuid, {})
                    user_to_this = {
                        'like': comment_interactions.get('like', 0),
                    }
                    middle['author_image'] = i.user_key.head_image
                    middle['s_username'] = i.user_key.username
                    middle['user_to_comment'] = user_to_this
                    middle['double_two'] = getattr(db.query(user).filter_by(id=i.double_two).first(), 'username', '0')
                    pre_data.append(CommonText_response_second_comment(**middle))
            else:
                for i in comments:
                    middle = i.__dict__.copy()
                    middle['author_image'] = i.user_key.head_image
                    middle['s_username'] = i.user_key.username
                    middle['double_two'] = getattr(db.query(user).filter_by(id=i.double_two).first(), 'username', '0')
                    pre_data.append(CommonText_response_second_comment(**middle))
        return pre_data
    except Exception as e:
        print(e)
        return status.HTTP_500_INTERNAL_SERVER_ERROR
    finally:
        db.close()
@community.post("/put/text/comments/first")
async  def deal_put_text_comments_first(article_id:int,author:int,text:str,Re:Request,db:session=Depends(get_db),userinfo=Depends(judge_token)):
    """
    处理文章一级评论上传,直接传回一个评论对象信息
    :param article_id: 
    :param author:
    :param db: 
    :return: 
    """

    try:
        comments=first_CommonComment(id=article_id,uuid=str(author)+get_to_second_now())
        comments.date=get_to_second_now()
        comments.author=author
        comments.content=text
        comments.user_keyidf=author
        formdata=await  Re.form()
        comments.image=formdata.get("image",None)
        db.add(comments)
        db.commit()
        # 先提交产生relationship
        db.query(Commontext).filter_by(id=article_id).update({"reply_count": Commontext.reply_count + 1})
        userexpand = db.query(user_expand).filter_by(id=author).first()
        if not userexpand:
            userexpand = user_expand(id=author,user_id=author)
            db.add(userexpand)
        else:
            pass
        middle= comments.to_dict()
        user_to_this = {
                'like': 0
        }
        middle['author_image'] = comments.user_key.head_image
        middle['f_username'] = comments.user_key.username
        middle['user_to_comment'] = user_to_this

        db.commit()
        return  CommonText_response_first_comment(**middle)
    except Exception as e:
        print(e)
        db.rollback()
        return 0

    finally:
        db.close()
@community.post("/put/text/comments/second")
async def deal_put_text_comments_second(article_id: int, f_author: int,author:int, text: str, Re: Request,f_uuid:str,
                                        double_two:int=0,db: session = Depends(get_db),userinfo=Depends(judge_token)):
    """
    处理文章二级评论,包括二级的平级。double_two是平级的账号
    :param article_id:
    :param f_author:
    :param author:
    :param text:
    :param Re:
    :param double_two:
    :param db:
    :return:
    """
    try:
        comments = second_CommonComment(id=article_id, uuid=str(f_author) + get_to_second_now(),f_uuid=f_uuid)
        comments.date = get_to_second_now()
        comments.author = author
        comments.user_keyids=author
        comments.f_author=f_author
        comments.content = text
        formdata = await  Re.form()
        comments.image = formdata.get("image", None)

        if double_two:

            # 无需处理二级回复量
            # 只需标记存储
            comments.double_two = double_two
            db.query(Commontext).filter_by(id=article_id).first().reply_count += 1
        else:
            # 处理对应一级评论
            db.query(first_CommonComment).filter_by(id=article_id,uuid=f_uuid).first().reply_count+=1
            db.query(Commontext).filter_by(id=article_id).first().reply_count += 1
        db.add(comments)
        db.commit()
        middle=comments.to_dict()
        user_to_this = {
            'like': 0
        }

        middle['author_image'] = comments.user_key.head_image
        middle['s_username'] = comments.user_key.username
        middle['user_to_comment'] = user_to_this
        double_two_id = getattr(comments, 'double_two', None)
        if double_two_id:
            user_obj = db.query(user).filter_by(id=double_two_id).first()
            middle['double_two'] = getattr(user_obj, 'username', '匿名')
        else:
            middle['double_two'] = '0'
        db.add(comments)
        db.commit()
        return CommonText_response_second_comment(**middle)
    except Exception as e:
        print(e)
        db.rollback()
        return 0
    finally:
        db.close()
@community.post("/set/text/comments/first/like")
def deal_comments_first_like(comment_uuid:str,user:int,type:int,db:session=Depends(get_db),userinfo=Depends(judge_token)):
    """
    用于修改用户评论的点赞信息
    :param comment_uuid:
    :param user:
    :param type:
    :param db:
    :return:
    """
    try:
        expandata = db.query(user_expand).filter_by(id=user).first()
        if  expandata:
                #获取数据
            pre_Data=json.loads(expandata.comments_likes_f or json.dumps({}))
                # 判断是否已经存储,并对应处理
            pre_Data.setdefault(comment_uuid, {})['like'] = type
            expandata.comments_likes_f=json.dumps(pre_Data)
            db.commit()
        else:
                userexpand=user_expand(id=user)

                data = {
                    comment_uuid: {
                        'like': 1,
                    }
                }
                userexpand.id=user
                userexpand.comments_likes_f=json.dumps(data)
                db.add(userexpand)
                db.commit()
        # 处理评论点赞信息
        middle_query=db.query(first_CommonComment).filter_by(uuid=comment_uuid).first()
        middle_query.like_count=(
            max(0, middle_query.like_count + (1 if type else -1)))
        db.commit()
        return status.HTTP_200_OK
    except Exception as e:
        print(e)
        db.rollback()
        return status.HTTP_500_INTERNAL_SERVER_ERROR
    finally:
        db.close()
@community.post("/set/text/comments/second/like")
def deal_comments_first_like(comment_uuid:str,user:int,type:int,db:session=Depends(get_db),userinfo=Depends(judge_token)):
    """
    用于修改用户评论的点赞信息.3
    :param comment_uuid:
    :param user:
    :param type:
    :param db:
    :return:
    """
    try:
        expandata = db.query(user_expand).filter_by(id=user).first()
        if  expandata:
                #获取数据
            pre_Data=json.loads(expandata.comments_likes_s or json.dumps({}))
                # 判断是否已经存储,并对应处理
            pre_Data.setdefault(comment_uuid, {})['like'] = type
            expandata.comments_likes_s=json.dumps(pre_Data)
            db.commit()
        else:
                userexpand=user_expand(id=user)

                data = {
                    comment_uuid: {
                        'like': 1,
                    }
                }
                userexpand.id=user
                userexpand.comments_likes_f=json.dumps(data)
                db.add(userexpand)
                db.commit()
        # 处理评论点赞信息
        middle_query=db.query(second_CommonComment).filter_by(uuid=comment_uuid).first()
        middle_query.like_count=(
            max(0, middle_query.like_count + (1 if type else -1)))
        db.commit()
        return status.HTTP_200_OK
    except Exception as e:
        print(e)
        db.rollback()
        return status.HTTP_500_INTERNAL_SERVER_ERROR
    finally:
        db.close()

@community.get("/index/get/search/history")
def deal_search_history(userid:int,db:session=Depends(get_db)):
    """
    暂且直接存储返回指定量数据历史
    :param userid:
    :param db:
    :return:
    """
    try:
        userdata = db.query(user_to_community_expand).filter_by(user_id=userid).first()
        if userdata:
            userdata=json.loads(userdata.search_history or json.dumps([]))
        else:
            userdata=[]
            user_data=user_to_community_expand(id=userid,user_id=userid)
            db.add(user_data)
        db.commit()
        return userdata
    except Exception as e:
        pass
    finally:
        db.close()

@community.get("/index/get/search/guess")
def deal_search_history(userid:int,db:session=Depends(get_db)):
    try:
        if userid:
            # 实际这里应该用一些手段处理一下猜测,这里直接返回历史条目
            userdata = db.query(user_to_community_expand).filter_by(user_id=userid).first()
            if userdata:
                data = json.loads(userdata.search_history or json.dumps([]))
            else:
                data = []
                user_data = user_to_community_expand(id=userid, user_id=userid)
                db.add(user_data)
        else:
            data=['阅读',"旅游","历史"]
        db.commit()
        return data
    except Exception as e:
        pass
    finally:
        db.close()

@community.get("/get/text/by_keywords")
def deal_search_text(keywords:str,userid:int=0,limit:int=1):
    """
    用来api方式的查询文本
    :param keywords:
    :param userid:
    :param limit:
    :return:
    """
    try:
     data=get_commonText_Search_By_keywords(keywords,userid,limit)
     return data
    except Exception as e:
        pass

@community.post("/get/text/user/baseinfo/selection")
def deal_get_user_texts_by(
        user_id: int = 0,
        limit: int = 1,
        keywords:str="",
        selection: Dict = Body(..., embed=True),
        db: session = Depends(get_db)
):
    try:
        now = datetime.datetime.now()
        date_expr = func.str_to_date(Commontext.date, '%Y.%m.%d %H:%i:%s')
        DateMaper = {
            'week': lambda q: q.filter(
                func.year(date_expr) == now.year,
                func.week(date_expr) == now.isocalendar()[1]
            ),
            'month': lambda q: q.filter(
                func.year(date_expr) == now.year,
                func.month(date_expr) == now.month
            ),
            'year': lambda q: q.filter(
                func.year(date_expr) == now.year
            )
        }
        query = db.query(
            Commontext.title,
            Commontext.date,
            Commontext.id,
            Commontext.index_image,
            Commontext.kind,
            Commontext.resource
        )
        query=query.filter(Commontext.title.ilike(f"%{keywords}%"))
        if user_id != 0:
            query = query.filter(Commontext.author == user_id)
        for field, value in selection.items():
            if hasattr(Commontext, field):
                if value is not None and field != "date":
                    if field == 'kind':
                        value = TEXT_KIND_ESCAPE.get(value)
                    query = query.filter(getattr(Commontext, field) == value)
            else:
                raise status.HTTP_400_BAD_REQUEST

        date_type = selection.get('date')
        if date_type in DateMaper:
            query = DateMaper[date_type](query)
        truequery = query.offset((limit-1)*GET_MY_ARTICLE_LENGTH).limit(GET_MY_ARTICLE_LENGTH)
        result = truequery.all()
        results = [
            {
                'title': re.title,
                'date': re.date,
                'id': re.id,
                'index_image': re.index_image,
                'kind': re.kind,
                'resource': TEXT_RESOURCE_KIND.get(re.resource)
            }
            for re in result
        ]
        total_count = query.count()
        return {
            'selection': selection,
            "results": results,
            'meta': {
                "page": limit,
                'total_count': total_count,
            }
        }
    except Exception as e:
        return e
    finally:
        db.close()


@community.post("/fix/text/user/id")
def deal_delete_user_text_byid(user_id:int,article_id:int,type:int=0,db:session=Depends(get_db),userinfo=Depends(judge_token)):
    try:
        StatusMapper = {
            0: lambda q: setattr(q, "active", False),
            1: lambda q: setattr(q, "active", True),
            2: lambda q: db.delete(q),
        }
        textdata=db.query(Commontext).filter_by(id=article_id,author=user_id)
        StatusMapper[type](textdata)
        db.commit()
        return status.HTTP_200_OK
    except Exception as e:
        db.rollback()
        return status.HTTP_500_INTERNAL_SERVER_ERROR
    finally:
        db.close()

@community.get("/get/text/user/self/id")
def deal_get_text_user_self_id(article_id:int,db:session=Depends(get_db)):
    try:
        data=db.query(Commontext).filter_by(id=article_id).one()
        if data:
            return data
        return status.HTTP_404_NOT_FOUND
    except Exception as e:
        return status.HTTP_500_INTERNAL_SERVER_ERROR
    finally:
        db.close()


def build_query_comments(model, user_id, keywords, selection):
    db = get_db()
    try:
        query = db.query(model).filter(model.content.ilike(f"%{keywords}%"))
        if user_id != 0:
            query = query.filter(model.author == user_id)

        for field, value in selection.items():
            if hasattr(model, field) and value is not None and field != "date":
                query = query.filter(getattr(model, field) == value)
        return query
    finally:
        db.close()


@community.post("/get/user/comments/selections")
def deal_get_comments_selections(
        user_id: int,
        keywords: str = '',
        selection: Dict = Body(..., embed=True),
        limit: int = 1,
        db: session = Depends(get_db)
):
    try:
        query1 = build_query_comments(first_CommonComment, user_id, keywords, selection)
        query2 = build_query_comments(second_CommonComment, user_id, keywords, selection)
        results1 = [
            {**{k: getattr(re, k) for k in ['uuid', 'date', 'id', 'image', 'content']}, 'kind': '楼主'}
            for re in query1.offset((limit - 1) * GET_MY_COMMENT_LENGTH).limit(GET_MY_COMMENT_LENGTH).all()
        ]
        results2 = [
            {**{k: getattr(re, k) for k in ['uuid', 'date', 'id', 'image', 'content']}, 'kind': '回复'}
            for re in query2.offset((limit - 1) * GET_MY_COMMENT_LENGTH).limit(GET_MY_COMMENT_LENGTH).all()
        ]
        results = results1 + results2
        if selection.get('isfloor') == 'Landlord':
            results = results1
        elif selection.get('isfloor') == 'replay':
            results = results2
        total_count1 = query1.count()
        total_count2 = query2.count()

        return {
            'selection': selection,
            "results": results,
            'meta': {
                "page": limit,
                'total_count': total_count1 + total_count2,
            }
        }
    except Exception as e:
        print(e)
        return status.HTTP_400_BAD_REQUEST
    finally:
        db.close()
@community.post("/delete/user/comments/id")
def deal_Delete_comments_id(user_id:int,comment_id:str,db:session=Depends(get_db),userinfo=Depends(judge_token)):
    try:
        db.query(first_CommonComment,second_CommonComment).filter(
            or_(and_(first_CommonComment.author == user_id, first_CommonComment.uuid == comment_id),
                and_(second_CommonComment.author == user_id, second_CommonComment.uuid == comment_id))
        ).all()
    except Exception as e:
        pass
    finally:
        db.close()






