import cv2
from flask import Flask, request, render_template, jsonify, send_from_directory
from flask_cors import CORS
import os
import requests
import numpy as np
import json
from paddleocr import PaddleOCR, draw_ocr

# det_model_dir = r'E:\paddle_learn\en_PP-OCRv3_det_infer'  # 文本位置检测模型
det_model_dir = r'E:\paddle_learn\ch_PP-OCRv4_det_server_infer'
# rec_model_dir = r'E:\paddle_learn\ch_PP-OCRv4_rec_server_infer\ch, en'  # 文本识别模型
cls_model_dir = r'E:\paddle_learn\ch_ppocr_mobile_v2.0_cls_infer'  # 文本方向分类模型
API_KEY = "QXxVhpxj6KbqW1VvUxCeUe3e"
SECRET_KEY = "MKOBPDbZeQWHgEQGaX93xxopChtJi2I0"

app = Flask(__name__,  template_folder='templates')
CORS(app)

@app.route('/')
def index():
    return render_template('index.html')

@app.route('/Common/Base.css')
def common_static():
    filename = 'Base.css'
    common_dir = os.path.join(os.path.dirname(__file__), 'Common')
    return send_from_directory(common_dir, filename)

@app.route('/Common/SideNavigation.css')
def side_navigation_static():
    filename = 'SideNavigation.css'
    common_dir = os.path.join(os.path.dirname(__file__), 'Common')
    return send_from_directory(common_dir, filename)

@app.route('/Index/Index.css')
def index_css_static():
    filename = 'Index.css'
    index_dir = os.path.join(os.path.dirname(__file__), 'Index')
    return send_from_directory(index_dir, filename)

@app.route('/templates/index.html')
def templates():
    filename = 'index.html'
    return send_from_directory('templates', filename)

@app.route('/Index/Index.js')
def index_js_static():
    filename = 'Index.js'
    index_dir = os.path.join(os.path.dirname(__file__), 'Index')
    return send_from_directory(index_dir, filename)

@app.route('/HistoricalTran/HistoricalTran.css')
def historical_css_static():
    filename = 'HistoricalTran.css'
    historical_dir = os.path.join(os.path.dirname(__file__), 'HistoricalTran')
    return send_from_directory(historical_dir, filename)

@app.route('/HistoricalTran/HistoricalTran.html')
def historical_html_static():
    filename = 'HistoricalTran.html'
    historical_dir = os.path.join(os.path.dirname(__file__), 'HistoricalTran')
    return send_from_directory(historical_dir, filename)

@app.route('/HistoricalTran/HistoricalTran.js')
def historical_js_static():
    filename = 'HistoricalTran.js'
    historical_dir = os.path.join(os.path.dirname(__file__), 'HistoricalTran')
    return send_from_directory(historical_dir, filename)

@app.route('/AIDialogue/AIDialogue.css')
def ai_css_static():
    filename = 'AIDialogue.css'
    ai_dir = os.path.join(os.path.dirname(__file__), 'AIDialogue')
    return send_from_directory(ai_dir, filename)

@app.route('/AIDialogue/AIDialogue.html')
def ai_html_static():
    filename = 'AIDialogue.html'
    ai_dir = os.path.join(os.path.dirname(__file__), 'AIDialogue')
    return send_from_directory(ai_dir, filename)

@app.route('/AIDialogue/AIDialogue.js')
def ai_js_static():
    filename = 'AIDialogue.js'
    ai_dir = os.path.join(os.path.dirname(__file__), 'AIDialogue')
    return send_from_directory(ai_dir, filename)

@app.route('/saveText', methods=['POST'])
def translate_text():
    content = request.form.get('inputText')
    fromLang = request.form.get('fromLanguage')
    toLang = request.form.get('toLanguage')
    translateMode = request.form.get('translateMode')
    if translateMode == 'text':
        url = "https://aip.baidubce.com/rpc/2.0/mt/texttrans/v1?access_token=" + get_access_token()

        payload = json.dumps({
            "from": fromLang,
            "to": toLang,
            "q": content
        })
        headers = {
            'Content-Type': 'application/json',
            'Accept': 'application/json'
        }

        response = requests.request("POST", url, headers=headers, data=payload)

        if response.ok:
            result = response.json()
            return jsonify(result)  # 返回 JSON 格式的响应
        else:
            # 处理响应失败的情况
            return jsonify({"error": "请求失败"})
    else:
        return jsonify({"error": "不支持的翻译模式"})


@app.route('/saveImage', methods=['POST'])
def save_image():
    fromLang = request.form.get('fromLanguage')
    toLang = request.form.get('toLanguage')
    translateMode = request.form.get('translateMode')
    image_data = request.files['imageData']

    if fromLang == 'zh':
        from_lang = 'ch'
        rec_model_dir = r'E:\paddle_learn\ch_PP-OCRv4_rec_server_infer\ch'
    elif fromLang == 'en':
        from_lang = 'en'
        rec_model_dir = r'E:\paddle_learn\ch_PP-OCRv4_rec_server_infer\en'
    elif fromLang == 'kor':
        from_lang = 'korean'
        rec_model_dir = r'E:\paddle_learn\ch_PP-OCRv4_rec_server_infer\kor'
    elif fromLang == 'jp':
        from_lang = 'japan'
        rec_model_dir = r'E:\paddle_learn\ch_PP-OCRv4_rec_server_infer\jp'
    elif fromLang == 'tam':
        from_lang = 'ta'
        rec_model_dir = r'E:\paddle_learn\ch_PP-OCRv4_rec_server_infer\tam'
    elif fromLang == 'lat':
        from_lang = 'latin'
        rec_model_dir = r'E:\paddle_learn\ch_PP-OCRv4_rec_server_infer\lat'
    elif fromLang == 'ara':
        from_lang = 'arabic'
        rec_model_dir = r'E:\paddle_learn\ch_PP-OCRv4_rec_server_infer\ara'

    if translateMode == 'image':

        if image_data:
            # 确保目录存在E:\pycharm\test\pythonProject1\AI-translate\image
            save_dir = 'E:\\pycharm\\test\\pythonProject1\\AI-translate\\image'

            if not os.path.exists(save_dir):
                os.makedirs(save_dir)

            # 保存图片
            image_data.save(os.path.join(save_dir, image_data.filename))
            image_path = os.path.join(save_dir, image_data.filename)
            image = cv2.imread(image_path)
            img_path = image_pretreatment(image, image_data.filename)
            ocr = PaddleOCR(use_angle_cls=True, lang=from_lang, det_model_dir=det_model_dir, rec_model_dir=rec_model_dir,
                            cls_model_dir=cls_model_dir)
            # ['ch', 'en', 'korean', 'japan', 'ta', 'arabic', 'latin']
            result1 = ocr.ocr(image_path, cls=True)
            result = result1[0]
            print(result)
            txts = [line[1][0] for line in result]
            txt_string = ''
            for txt in txts:
                txt_string = txt_string + txt
            content = txt_string
            print(content)

            url = "https://aip.baidubce.com/rpc/2.0/mt/texttrans/v1?access_token=" + get_access_token()

            payload = json.dumps({
                "from": fromLang,
                "to": toLang,
                "q": content
            })
            headers = {
                'Content-Type': 'application/json',
                'Accept': 'application/json'
            }

            response = requests.request("POST", url, headers=headers, data=payload)

            if response.ok:
                result = response.json()
                return jsonify(result)  # 返回 JSON 格式的响应
            else:
                # 处理响应失败的情况
                return jsonify({"error": "请求失败"})
        else:
            return jsonify({"error": "不支持的翻译模式"})


def get_access_token():
    """
    使用 AK，SK 生成鉴权签名（Access Token）
    :return: access_token，或是None(如果错误)
    """
    url = "https://aip.baidubce.com/oauth/2.0/token"
    params = {"grant_type": "client_credentials", "client_id": API_KEY, "client_secret": SECRET_KEY}
    return str(requests.post(url, params=params).json().get("access_token"))


# 图像等比例放缩
def image_resize(image, image_h=None, image_w=None):
    new_image = image.copy()
    (h, w) = new_image.shape[:2]
    if image_h is None and image_w is None:
        return new_image
    if image_h:
        ratio = image_h / float(h)
        dim = (int(ratio * w), image_h)
    if image_w:
        ratio = image_w / float(w)
        dim = (image_w, int(ratio * h))
    new_image = cv2.resize(new_image, dim, interpolation=cv2.INTER_AREA)
    print('image_resize: 图像等比例放缩')
    return new_image, ratio


# 灰度图，高斯滤波去噪声，边缘检测，轮廓检测
def image_pro(image):
    new_image = image.copy()
    new_image = cv2.cvtColor(new_image, cv2.COLOR_BGR2GRAY)  # 转灰度
    new_image = cv2.GaussianBlur(new_image, (5, 5), 0)  # 高斯滤波
    new_image = cv2.Canny(new_image, 75, 200)  # 边缘检测
    print('image_pro: 灰度图，高斯滤波去噪声，边缘检测')
    return new_image


# 轮廓检测
def image_outline(image, source_image):
    new_image = image.copy()
    # cv2.RETR_LIST：以列表形式输出轮廓信息，各轮廓之间无等级关系
    # cv2.CHAIN_APPROX_SIMPLE：压缩水平方向，垂直方向，对角线方向的元素，只保留该方向的终点坐标
    # 函数返回contours：list结构，列表中每个元素代表一个边沿信息；hierarchy：返回类型是(x,4)的二维ndarray。
    screenCnt = None
    outline_list = cv2.findContours(new_image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[0]  # 轮廓列表
    outline_list = sorted(outline_list, key=cv2.contourArea, reverse=True)[:5]  # 排序，输出前5个轮廓
    for line_list in outline_list:
        peir = cv2.arcLength(line_list, True)  # 计算闭合周长

        # cv2.approxPolyDP第二个参数，距离大于此阈值则舍弃，小于此阈值则保留，epsilon越小，折线的形状越“接近”曲线
        approx = cv2.approxPolyDP(line_list, 0.02 * peir, True)  # 多边形逼近,把一个连续光滑曲线折线化

        # 四个点的时候拿出
        if len(approx) == 4:
            screenCnt = approx
            break

    # 检查轮廓是否为空
    if screenCnt is not None:
            cv2.drawContours(source_image, [screenCnt], -1, (0, 255, 0), 2)
            print('image_outline: 轮廓检测')

    return screenCnt


# 坐标对应，按顺序找到对应坐标0123分别是 左上，右上，右下，左下
def order_points(pts):
    # 一共4个坐标点
    rect = np.zeros((4, 2), dtype="float32")

    # 计算左上，右下
    s = pts.sum(axis=1)
    rect[0] = pts[np.argmin(s)]
    rect[2] = pts[np.argmax(s)]

    # 计算右上和左下
    diff = np.diff(pts, axis=1)
    rect[1] = pts[np.argmin(diff)]
    rect[3] = pts[np.argmax(diff)]
    return rect


# 透视变换
def four_point_transform(image, resize_screenCnt):
    order_points_out = order_points(resize_screenCnt)
    (top_l, top_r, bot_r, bot_l) = order_points_out

    width_top = np.sqrt((top_r[0] - top_l[0]) ** 2 + (top_r[1] - top_l[1]) ** 2)
    width_bot = np.sqrt((bot_r[0] - bot_l[0]) ** 2 + (bot_r[1] - bot_l[1]) ** 2)
    width_max = max(int(width_top), int(width_bot))

    hight_l = np.sqrt((top_l[0] - bot_l[0]) ** 2 + (top_l[1] - top_l[1]) ** 2)
    hight_r = np.sqrt((top_r[0] - bot_r[0]) ** 2 + (top_r[1] - bot_r[1]) ** 2)
    hight_max = max(int(hight_l), int(hight_r))

    # 变换后对应坐标位置
    dst = np.array([[0, 0],
                    [width_max - 1, 0],
                    [width_max - 1, hight_max - 1],
                    [0, hight_max - 1]], dtype="float32")

    # 计算变换矩阵
    matrix = cv2.getPerspectiveTransform(order_points_out, dst)  # 参数（src，sdt）src：源图像中待测矩形的四点坐标；sdt：目标图像中矩形的四点坐标
    warped = cv2.warpPerspective(image, matrix, (width_max, hight_max))  # 参数（输入图像，变换矩阵，目标图像shape）

    # 返回变换后结果
    print('four_point_transform: 透视变换')
    return warped


# 二值化,保存
def binarization(image, image_name):
    new_image = image.copy()
    new_image = cv2.cvtColor(new_image, cv2.COLOR_BGR2GRAY)  # 转灰度
    _, new_image = cv2.threshold(new_image, 100, 255, cv2.THRESH_OTSU)  # 二值化
    print('binarization: 二值化')
    image_path = 'E:\\pycharm\\test\\pythonProject1\\AI-translate\\image\\out' + image_name
    cv2.imwrite(image_path, new_image)
    return image_path


# 预处理
def image_pretreatment(image, image_name):
    new_image = image.copy()
    image_resize_out, ratio = image_resize(new_image, image_h=500)  # 图像等比例放缩
    image_pro_out = image_pro(image_resize_out)  # 灰度图，高斯滤波去噪声，边缘检测，轮廓检测
    source_image = image_resize_out.copy()
    screenCnt = image_outline(image_pro_out, source_image)  # 轮廓检测
    if screenCnt is not None:
        image_four_point_transform = four_point_transform(image, screenCnt.reshape(4, 2) / ratio)  # 透视变换
        image_path = binarization(image_four_point_transform, image_name)  # 二值化，保存
    else:
        image_path = binarization(source_image, image_name)  # 二值化，保存
    return image_path


if __name__ == '__main__':
    app.run()
