import streamlit as st
import os
from PIL import Image, ImageChops, ExifTags
import random
import string
from importlib.resources import path
import sys
from PIL import Image, ImageFilter, ImageDraw
import operator as op
from optparse import OptionParser
import threading
from PIL import ImageTk, Image, ImageChops
import numpy as np
from sklearn.cluster import DBSCAN
import numpy as np
import cv2
import subprocess
from PIL import Image
from scipy import signal
from sklearn.cluster import KMeans
import  math
from PIL import Image, ImageDraw, ImageChops
from scipy import signal
from sklearn.cluster import KMeans
import argparse
import os
import numpy as np
import cv2
import pathlib
import sys
from os.path import join
from sklearn.cluster import DBSCAN
import numpy as np
import os
from tqdm import tqdm
from os.path import join
from utils.model import *
from configs import load_yaml
import trufor.src.trufor_test as tf
import trufor.visualize as tfv
import os





class ThreadExecutor:
    def __init__(self, target_func):
        self.target_func = target_func
        self.thread = None

    def start(self):
        self.thread = threading.Thread(target=self.target_func)
        self.thread.start()

    def join(self):
        if self.thread:
            self.thread.join()

class FileNameGenerator:
    def __init__(self, file_extension):
        self.file_extension = file_extension
        self.file_name = self.generate_file_name() + self.file_extension

    def generate_file_name(self):
        # 生成随机文件名
        letters_and_digits = string.ascii_letters + string.digits
        return ''.join(random.choice(letters_and_digits) for i in range(10))
    




def show_image(image_path):
    """
    展示图片的函数
    
    参数:
        image_path (str): 图片文件的路径
    """
    image = open(image_path, 'rb').read()
    st.image(image, caption='Image',width=500)

def show_video(video_path):
    """
    展示视频的函数
    
    参数:
        video_path (str): 视频文件的路径
    """
    video_file = open(video_path, 'rb')
    video_bytes = video_file.read()
    st.video(video_bytes)



def file_uploader(upload_path):
    """
    文件上传函数，返回上传的文件路径
    
    参数:
        upload_path (str): 指定的上传路径
    
    返回:
        file_path (str): 上传的文件路径
    """
    uploaded_file = st.file_uploader("上传文件", type=["jpg", "jpeg", "png", "gif", "mp4", "avi", "mov"])
    if uploaded_file is not None:
        file_path = os.path.join(upload_path, uploaded_file.name)
        with open(file_path, "wb") as f:
            f.write(uploaded_file.read())
        return file_path
    else:
        return None


def show_videos_with_suffix(folder_path, video_suffix):
    """
    展示指定文件夹下特定后缀的视频函数
    
    参数:
        folder_path (str): 包含视频文件的文件夹路径
        video_suffix (list): 视频文件的后缀列表
    """
    for file in os.listdir(folder_path):
        if any(file.endswith(suffix) for suffix in video_suffix):
            video_file_path = os.path.join(folder_path, file)
            print("Displaying video:", video_file_path)
            show_video(video_file_path)  # 调用之前定义的展示单个视频的函数
    
def analyze_image(image_path,output_path):
    try:
        with Image.open(image_path) as img:
            with open(output_path, "w") as file:
                # 图片尺寸
                dimensions = img.size
                file.write(f"Dimensions: {dimensions}\n")

                # 图片模式
                mode = img.mode
                file.write(f"Mode: {mode}\n")

                # Exif元数据
                exif_data = img._getexif()  # 更安全的访问Exif数据方式
                if exif_data:    
                    print("\nExif Data:", file=file)
                    for tag_id, value in exif_data.items():
                        tag_name = ExifTags.get(tag_id, f"Tag {tag_id}")
                        file.write(f"{tag_name}: {value}\n")
                else:
                    file.write("No Exif Data Found.\n")
                iptc_info = img.info.get('Iptc')
                if iptc_info:
                    file.write("\nIPTC Data Found:\n")
                    for key, value in iptc_info.items():
                        # 如果值是列表，将其转换为字符串
                        if isinstance(value, list):
                            value = ', '.join(str(v) for v in value)
                        file.write(f"{key}: {value}\n")
                else:
                    file.write("No IPTC Data Found.\n")

    except Exception as e:
        print(f"Error: {str(e)}")




def Dist(p1, p2):
    x1, y1 = p1
    x2, y2 = p2
    return (((x1-x2)*(x1-x2)) + ((y1-y2)*(y1-y2)))**0.5


def intersectarea(p1, p2, size):
    x1, y1 = p1
    x2, y2 = p2
    ix1, iy1 = max(x1, x2), max(y1, y2)
    ix2, iy2 = min(x1+size, x2+size), min(y1+size, y2+size)
    iarea = abs(ix2-ix1)*abs(iy2-iy1)
    if iy2 < iy1 or ix2 < ix1:
        iarea = 0
    return iarea


def Hausdorff_distance(clust1, clust2, forward, dir):
    if forward == None:
        return max(Hausdorff_distance(clust1, clust2, True, dir), Hausdorff_distance(clust1, clust2, False, dir))
    else:
        clstart, clend = (clust1, clust2) if forward else (clust2, clust1)
        dx, dy = dir if forward else (-dir[0], -dir[1])
        return sum([min([Dist((p1[0]+dx, p1[1]+dy), p2) for p2 in clend]) for p1 in clstart])/len(clstart)


def hassimilarcluster(ind, clusters, imauto,imblev,impalred,rgsim,rgsize,blsim,blcoldev,blint):
    item = op.itemgetter
    found = False
    tx = min(clusters[ind], key=item(0))[0]
    ty = min(clusters[ind], key=item(1))[1]
    for i, cl in enumerate(clusters):
        if i != ind:
            cx = min(cl, key=item(0))[0]
            cy = min(cl, key=item(1))[1]
            dx, dy = cx - tx, cy - ty
            specdist = Hausdorff_distance(clusters[ind], cl, None, (dx, dy))
            if specdist <= int(rgsim):
                found = True
                break
    return found


def blockpoints(pix, coords, size):
    xs, ys = coords
    for x in range(xs, xs+size):
        for y in range(ys, ys+size):
            yield pix[x, y]


def colortopalette(color, palette):
    for a, b in palette:
        if color >= a and color < b:
            return b


def imagetopalette(image, palcolors):
    assert image.mode == 'L', "Only grayscale images supported !"
    pal = [(palcolors[i], palcolors[i+1]) for i in range(len(palcolors)-1)]
    image.putdata([colortopalette(c, pal) for c in list(image.getdata())])


def getparts(image, block_len, imauto,imblev,impalred,rgsim,rgsize,blsim,blcoldev,blint):
    img = image.convert('L') if image.mode != 'L' else image
    w, h = img.size
    parts = []
    # Bluring image for abandoning image details and noise.
    for n in range(int(imblev)):
        img = img.filter(ImageFilter.SMOOTH_MORE)
    # Converting image to custom palette
    imagetopalette(img, [x for x in range(256) if x % int(impalred) == 0])
    pix = img.load()

    for x in range(w-block_len):
        for y in range(h-block_len):
            data = list(blockpoints(pix, (x, y), block_len)) + [(x, y)]
            parts.append(data)
    parts = sorted(parts)
    return parts


def similarparts(imagparts,imauto,imblev,impalred,rgsim,rgsize,blsim,blcoldev,blint):
    dupl = []
    l = len(imagparts[0])-1

    for i in range(len(imagparts)-1):
        difs = sum(abs(x-y)
                   for x, y in zip(imagparts[i][:l], imagparts[i+1][:l]))
        mean = float(sum(imagparts[i][:l])) / l
        dev = float(sum(abs(mean-val) for val in imagparts[i][:l])) / l
        if mean == 0:
            mean = .000000000001
        if dev/mean >= float(blcoldev):
            if difs <= int(blsim):
                if imagparts[i] not in dupl:
                    dupl.append(imagparts[i])
                if imagparts[i+1] not in dupl:
                    dupl.append(imagparts[i+1])

    return dupl


def clusterparts(parts, block_len, imauto,imblev,impalred,rgsim,rgsize,blsim,blcoldev,blint):
    parts = sorted(parts, key=op.itemgetter(-1))
    clusters = [[parts[0][-1]]]

    # assign all parts to clusters
    for i in range(1, len(parts)):
        x, y = parts[i][-1]

        # detect box already in cluster
        fc = []
        for k, cl in enumerate(clusters):
            for xc, yc in cl:
                ar = intersectarea((xc, yc), (x, y), block_len)
                intrat = float(ar)/(block_len*block_len)
                if intrat > float(blint):
                    if not fc:
                        clusters[k].append((x, y))
                    fc.append(k)
                    break

        # if this is new cluster
        if not fc:
            clusters.append([(x, y)])
        else:
            # re-clustering boxes if in several clusters at once
            while len(fc) > 1:
                clusters[fc[0]] += clusters[fc[-1]]
                del clusters[fc[-1]]
                del fc[-1]

    item = op.itemgetter
    # filter out small clusters
    clusters = [clust for clust in clusters if Dist((min(clust, key=item(0))[0], min(clust, key=item(
        1))[1]), (max(clust, key=item(0))[0], max(clust, key=item(1))[1]))/(block_len*1.4) >= float(rgsize)]

    # filter out clusters, which doesn`t have identical twin cluster
    clusters = [clust for x, clust in enumerate(
        clusters) if hassimilarcluster(x, clusters, imauto,imblev,impalred,rgsim,rgsize,blsim,blcoldev,blint)]

    return clusters


def marksimilar(image, clust, size, imauto,imblev,impalred,rgsim,rgsize,blsim,blcoldev,blint):
    block_len = 15
    blocks = []
    if clust:
        draw = ImageDraw.Draw(image)
        mask = Image.new('RGB', (size, size), 'cyan')
        for cl in clust:
            for x, y in cl:
                im = image.crop((x, y, x+size, y+size))
                # 将im转换为RGB模式
                im = im.convert('RGB')
                im = Image.blend(im, mask, 0.5)
                blocks.append((x, y, im))
        for bl in blocks:
            x, y, im = bl
            image.paste(im, (x, y, x+size, y+size))
        if int(imauto):
            for cl in clust:
                cx1 = min([cx for cx, _ in cl])
                cy1 = min([cy for _, cy in cl])
                cx2 = max([cx for cx, _ in cl]) + block_len
                cy2 = max([cy for _, cy in cl]) + block_len
                draw.rectangle([cx1, cy1, cx2, cy2], outline="magenta")
    return image




def detect(path, imauto=1,imblev=8,impalred=15,rgsim=5,rgsize=1.5,blsim=200,blcoldev=0.2,blint=0.2,output=None):
    
    block_len = 15

    im = Image.open(path)
    lparts = getparts(im, block_len, imauto,imblev,impalred,rgsim,rgsize,blsim,blcoldev,blint)
    dparts = similarparts(lparts, imauto,imblev,impalred,rgsim,rgsize,blsim,blcoldev,blint)
    cparts = clusterparts(dparts, block_len, imauto,imblev,impalred,rgsim,rgsize,blsim,blcoldev,blint) if int(
        imauto) else [[elem[-1] for elem in dparts]]
    im = marksimilar(im, cparts, block_len, imauto,imblev,impalred,rgsim,rgsize,blsim,blcoldev,blint)

    # 获取输入文件的扩展名
    ext = path.split('.')[-1]

    # 显示图像
    im.show()

    # 保存图像，根据输入文件的扩展名保存
    # output=f'./output.{ext}'
    if output is None:
        output = f"{path.split('.')[0]}_analyzed.{ext}"
    im.save(output)
    identical_regions = len(cparts) if int(imauto) else 0
    print('\tCopy-move output is saved in file -', output)
    return identical_regions


def cfa_artifact(img):
    # Retrieve the path of the image file
    path = img
    # User has not selected an input image
    if path is None:
        # Show error message
        return None

class SIFTForgeryDetector(object):
    def __init__(self, input):
        self.image = cv2.imread(input)

    def siftDetector(self):
        sift = cv2.SIFT_create()
        # sift = cv2.xfeatures2d.SIFT_create()
        gray = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)
        self.key_points, self.descriptors = sift.detectAndCompute(gray, None)
        return self.key_points, self.descriptors

    def showSiftFeatures(self):
        gray_image = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)
        sift_image = cv2.drawKeypoints(
            self.image, self.key_points, self.image.copy())
        return sift_image

    def locateForgery(self, eps=40, min_sample=2):
        clusters = DBSCAN(eps=eps, min_samples=min_sample).fit(
            self.descriptors)
        size = np.unique(clusters.labels_).shape[0]-1
        forgery = self.image.copy()
        if (size == 0) and (np.unique(clusters.labels_)[0] == -1):
            print('No Forgery Found!!')
            return None
        if size == 0:
            size = 1
        cluster_list = [[] for i in range(size)]
        for idx in range(len(self.key_points)):
            if clusters.labels_[idx] != -1:
                cluster_list[clusters.labels_[idx]].append(
                    (int(self.key_points[idx].pt[0]), int(self.key_points[idx].pt[1])))
        for points in cluster_list:
            if len(points) > 1:
                for idx1 in range(1, len(points)):
                    # Green color in BGR
                    # cv2.line(forgery, points[0], points[idx1], (0, 255, 0), 5)
                    cv2.line(forgery, points[0], points[idx1], (128, 0, 128), 1) 
                    # cv2.line(forgery, points[0], points[idx1], (255, 0, 0), 5)
        return forgery


def Do_SIFTForgeryDetector(image_path):

    # 实例化SIFTForgeryDetector类
    detector = SIFTForgeryDetector(image_path)

    # 调用siftDetector方法进行SIFT特征检测
    key_points, descriptors = detector.siftDetector()

    # 调用locateForgery方法尝试定位伪造区域
    forgery_image = detector.locateForgery()
    Sift_image = detector.showSiftFeatures()
    if forgery_image is not None:
        # 如果发现伪造区域，显示带有连线的图像
        #cv2.imshow("Forgery Detection Result", forgery_image)
        #cv2.waitKey(0)  # 按任意键关闭窗口
        cv2.imwrite("./res/forgery_result.jpg", forgery_image)
        return 1

    else:
        cv2.imwrite("./res/forgery_result.jpg", Sift_image)
        return 0




def draw_blocks_with_noise(variances, original_image, output_path=None, block_size=32):
    imgwidth, imgheight = original_image.size
    canvas = Image.new('L', (imgwidth, imgheight))

    for i, block_variance in enumerate(variances):
        row = i // (imgheight // block_size)
        col = i % (imgwidth // block_size)

        block_top = row * block_size
        block_left = col * block_size

        # 使用噪声估计值直接作为灰度值
        noise_color = int(255 * block_variance[0] / max(variances)[0])  # 归一化到0-255
        noise_block = Image.new('L', (block_size, block_size), noise_color)

        canvas.paste(noise_block, (block_left, block_top))

    if output_path:
        canvas.save(output_path)
    else:
        canvas.show()
def estimate_noise(I):
    H, W = I.shape

    M = [[1, -2, 1], [-2, 4, -2], [1, -2, 1]]

    sigma = np.sum(np.sum(np.absolute(signal.convolve2d(I, M))))
    sigma = sigma * math.sqrt(0.5 * math.pi) / (6 * (W-2) * (H-2))

    return sigma

def detect_noise_variance(input_path, blockSize=32):
    im = Image.open(input_path)
    im = im.convert('1')

    blocks = []

    imgwidth, imgheight = im.size

    # break up image into NxN blocks, N = blockSize
    for i in range(0,imgheight,blockSize):
        for j in range(0,imgwidth,blockSize):
            box = (j, i, j+blockSize, i+blockSize)
            b = im.crop(box)
            a = np.asarray(b).astype(int)
            blocks.append(a)

    variances = []
    for block in blocks:
        variances.append([estimate_noise(block)])

    kmeans = KMeans(n_clusters=2, random_state=0).fit(variances)
    center1, center2 = kmeans.cluster_centers_



    if abs(center1 - center2) > .4:
        print('\nNoise variance inconsistency detected')
    else:
        print('\nNo noise variance inconsistency detected')
    print(abs(center1 - center2)[0])
    with open("noiser_res_num.txt",'w') as file:
        file.write(f"{abs(center1 - center2)}"[1:-1])
    # 输出噪声估计值的图像
    output_image_path = './res/noise_res.jpg'
    draw_blocks_with_noise(variances, im,output_path=output_image_path)










# Avoid errors when directly run this script from './demo'
project_root = pathlib.Path(__file__).parent.parent.as_posix()
sys.path.extend([project_root])
import utils.shared as shared
from utils.landmark_utils import detect_frames_track


def detect_track(input_path, video):
    vidcap = cv2.VideoCapture(join(input_path, video))
    fps = vidcap.get(cv2.CAP_PROP_FPS)
    frames = []
    while True:
        success, image = vidcap.read()
        if success:
            frames.append(image)
        else:
            break

    raw_data = detect_frames_track(frames, video, fps)

    vidcap.release()
    return np.array(raw_data)


def extract_landmarks(input_path='./upload/video_input', output_path='./landmarks/',visualize='store_true',visualize_path='./visualize/',log_file='landmark_logs.txt',fd='retinaface'):
    shared.use_visualization = visualize
    shared.visualize_path = visualize_path
    shared.log_file = log_file
    shared.face_detector_selection = fd

    """
    Prepare the environment
    """
    assert os.path.exists(input_path), "Input path does not exist."

    if not os.path.exists(output_path):
        os.makedirs(output_path)

    if shared.use_visualization:
        print("Settings: Visualize the extraction results.")
        if not os.path.exists(shared.visualize_path):
            os.makedirs(shared.visualize_path)
    else:
        print("Settings: NOT visualize the extraction results.")

    videos = os.listdir(input_path)
    videos.sort()
    for video in videos:
        if video.startswith('.'):
            continue

        video_name = video.split('.')[0]
        if os.path.exists(join(output_path, video_name + ".txt")):
            print(video_name + " exist, skipped")
            continue
        print("Extract landmarks from {}.".format(video))
        raw_data = detect_track(input_path, video)

        if len(raw_data) == 0:
            print("No face detected", video)
        else:
            np.savetxt(join(output_path, video_name + ".txt"), raw_data, fmt='%1.5f')
        print("Landmarks data saved!")
    return







def get_data_for_test(path, fake, block):  # fake:manipulated=1 original=0
    files = os.listdir(path)
    x = []
    x_diff = []
    y = []

    video_y = []
    count_y = {}
    sample_to_video = []

    print("Loading data and embedding...")
    for file in tqdm(files):
        vectors = np.loadtxt(join(path, file))
        video_y.append(fake)

        for i in range(0, vectors.shape[0] - block, block):
            vec = vectors[i:i + block, :]
            x.append(vec)
            vec_next = vectors[i + 1:i + block, :]
            vec_next = np.pad(vec_next, ((0, 1), (0, 0)), 'constant', constant_values=(0, 0))
            vec_diff = (vec_next - vec)[:block - 1, :]
            x_diff.append(vec_diff)

            y.append(fake)

            # Dict for counting number of samples in video
            if file not in count_y:
                count_y[file] = 1
            else:
                count_y[file] += 1

            # Recording each samples belonging
            sample_to_video.append(file)
    return np.array(x), np.array(x_diff), np.array(y), np.array(video_y), np.array(sample_to_video), count_y


def predict(model, sample, device):
    model.to(device)
    model.eval()
    sample = torch.from_numpy(sample).float().to(device)
    output = model(sample)
    predictions = output.cpu().detach().numpy()
    return predictions


def merge_video_prediction(mix_prediction, s2v, vc):
    prediction_video = []
    pre_count = {}
    for p, v_label in zip(mix_prediction, s2v):
        p_bi = 0
        if p >= 0.5:
            p_bi = 1
        if v_label in pre_count:
            pre_count[v_label] += p_bi
        else:
            pre_count[v_label] = p_bi
    for key in pre_count.keys():
        prediction_video.append(pre_count[key] / vc[key])
    return prediction_video


def classify():

    """
    Initialization
    """

    # Optional to uncomment if some bugs occur.
    # os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
    device = "cuda" if torch.cuda.is_available() else "cpu"
    print("Using device: {}".format(device))

    args_model = load_yaml("configs/args_model.yaml")
    args_inference = load_yaml("configs/args_inference.yaml")

    BLOCK_SIZE = args_inference["BLOCK_SIZE"]
    landmark_path = args_inference["landmark_path"]

    add_weights = args_inference["add_weights"]
    weights_name_g1 = args_inference["weights_name_g1"]
    weights_name_g2 = args_inference["weights_name_g2"]

    assert os.path.exists(landmark_path), "Landmark path does not exist. Please extract the landmarks firstly."
    test_samples, test_samples_diff, _, _, test_sv, test_vc = get_data_for_test(landmark_path, 1, BLOCK_SIZE)

    g1 = LRNet(**args_model)
    g2 = LRNet(**args_model)

    print("Loading models and predicting...")

    g1.load_state_dict(torch.load(join(add_weights, weights_name_g1), map_location=device))
    g2.load_state_dict(torch.load(join(add_weights, weights_name_g2), map_location=device))

    prediction = predict(g1, test_samples, device)
    prediction_diff = predict(g2, test_samples_diff, device)

    assert len(prediction) == len(prediction_diff)
    mix_predict = []
    for i in range(len(prediction)):
        mix = prediction[i][1] + prediction_diff[i][1]
        mix_predict.append(mix/2)

    prediction_video = merge_video_prediction(mix_predict, test_sv, test_vc)

    """
    Show the results
    """
    results = []
    print("\n\n", "#----Prediction Results----#")
    video_names = []
    for key in test_vc.keys():
        video_names.append(key)
    for i, pd in enumerate(prediction_video):
        if pd >= 0.3:
            label = "Fake"
        else:
            label = "Real"
        print("{}-Prediction label: {}; Scores:{}".format(video_names[i], label, pd))
        result = "{}-Prediction label: {}; Scores:{}".format(video_names[i], label, pd)
        results.append(result)

    print("#------------End------------#")
    return results




def ela_analysis(uploaded_image,ouput_path="./res/ela/difference_scaled.png"):
    # Retrieve the path of the image file
    path = uploaded_image
    TEMP = 'temp.jpg'
    SCALE = 10

    # User has not selected an input image
    if path is None:
        # Show error message
        return

    original = Image.open(path)

    # Convert to RGB if the image has an Alpha channel
    if original.mode == 'RGBA':
        original = original.convert('RGB')

    # Save the possibly converted image
    original.save(TEMP, quality=90)

    # Re-open the saved image for comparison
    temporary = Image.open(TEMP)

    # Ensure both images are in the same mode for comparison
    if temporary.mode != original.mode:
        temporary = temporary.convert(original.mode)

    # diff = ImageChops.difference(original, temporary)
    # d = diff.load()
    # WIDTH, HEIGHT = diff.size

    # for x in range(WIDTH):
    #     for y in range(HEIGHT):
    #         d[x, y] = tuple(k * SCALE for k in d[x, y])

    # diff.show()
    diff = ImageChops.difference(original, temporary)
    d = diff.load()
    WIDTH, HEIGHT = diff.size

    for x in range(WIDTH):
        for y in range(HEIGHT):
            d[x, y] = tuple(k * SCALE for k in d[x, y])

    # 保存差异图像
    diff.save(ouput_path)  # "difference_scaled.png"是你要保存的文件名




directory_path = ['res/trufor', 'upload/images', 'out/trufor']
for dp in directory_path:
    if not os.path.exists(dp):
        os.makedirs(dp)
        
def trufor(image):
    outpath="out/trufor/"
    image_name = os.path.basename(image)
    # python trufor/src/trufor_test.py -i upload/images -o ./out/trufor -g -1
    tf.trufor_test(image, outpath)
    # cpu => trufor(image, output, -1)
    # gpu => trufor(image, output)
    respath='res/trufor/'
    image_res = respath 
    # python trufor/visualize.py --image upload/images/pristine1.jpg --out out/trufor/pristine1.jpg.npz --result res/trufor/temp.png
    score =  tfv.show_result(image, outpath+image_name+'.npz', '', image_res )
    print(score)
    print("image result => res/trufor/")
    return score
    import cv2
import numpy as np
from PIL import Image

def extract_LSB(image_path, channels='RGBA', num_bits=1):
    # Open the image
    img = Image.open(image_path)

    # Convert image to RGBA mode if necessary
    if 'A' in channels and img.mode != 'RGBA':
        img = img.convert('RGBA')
    elif img.mode != 'RGB':
        img = img.convert('RGB')

    # Extract pixel values
    pixels = np.array(img)

    # Extract LSB from selected channels
    lsb_pixels = pixels & ((1 << num_bits) - 1)  # Mask with num_bits 1s to extract LSB

    # Calculate standard deviation for the entire image
    std_dev_image = np.std(lsb_pixels)

    # Create an image from the LSB pixels
    if len(channels) > 1:
        if 'A' in channels:
            lsb_img = Image.fromarray((lsb_pixels * (255 // (2 ** num_bits))).astype(np.uint8), 'RGBA')
        else:
            lsb_img = Image.fromarray((lsb_pixels * (255 // (2 ** num_bits))).astype(np.uint8), 'RGB')
    else:
        channel_index = {'R': 0, 'G': 1, 'B': 2, 'A': 3}
        channel = channels.upper()
        lsb_channel = lsb_pixels[:, :, channel_index[channel]]
        lsb_img = Image.fromarray((lsb_channel * (255 // (2 ** num_bits))).astype(np.uint8), 'L')

    return lsb_img, std_dev_image

def Do_ExtractLSB(imgpath):
    lsb_img, std_dev_image = extract_LSB(imgpath, channels='RGBA', num_bits=1)
    lsb_img.save("./res/lsb_result.png")











import _thread as thread
import base64
import datetime
import hashlib
import hmac
import json
from urllib.parse import urlparse
import ssl
from datetime import datetime
from time import mktime
from urllib.parse import urlencode
from wsgiref.handlers import format_date_time
import websocket  # 使用websocket_client

answer = ""

appid = "d3fb4c44"    #填写控制台中获取的 APPID 信息
api_secret = "OGNjNmY5N2M0NDhmZDQ4YzIxOTc2ZjUz"   #填写控制台中获取的 APISecret 信息
api_key ="85311692ef36b5181251a367868fe9fe"    #填写控制台中获取的 APIKey 信息
imagedata = open("/home/xinan-works/res/ela/difference_scaled.png",'rb').read()




imageunderstanding_url = "wss://spark-api.cn-huabei-1.xf-yun.com/v2.1/image"#云端环境的服务地址
text =[{"role": "user", "content": str(base64.b64encode(imagedata), 'utf-8'), "content_type":"image"}]



class Ws_Param(object):
    # 初始化
    def __init__(self, APPID, APIKey, APISecret, imageunderstanding_url):
        self.APPID = APPID
        self.APIKey = APIKey
        self.APISecret = APISecret
        self.host = urlparse(imageunderstanding_url).netloc
        self.path = urlparse(imageunderstanding_url).path
        self.ImageUnderstanding_url = imageunderstanding_url

    # 生成url
    def create_url(self):
        # 生成RFC1123格式的时间戳
        now = datetime.now()
        date = format_date_time(mktime(now.timetuple()))

        # 拼接字符串
        signature_origin = "host: " + self.host + "\n"
        signature_origin += "date: " + date + "\n"
        signature_origin += "GET " + self.path + " HTTP/1.1"

        # 进行hmac-sha256进行加密
        signature_sha = hmac.new(self.APISecret.encode('utf-8'), signature_origin.encode('utf-8'),
                                 digestmod=hashlib.sha256).digest()

        signature_sha_base64 = base64.b64encode(signature_sha).decode(encoding='utf-8')

        authorization_origin = f'api_key="{self.APIKey}", algorithm="hmac-sha256", headers="host date request-line", signature="{signature_sha_base64}"'

        authorization = base64.b64encode(authorization_origin.encode('utf-8')).decode(encoding='utf-8')

        # 将请求的鉴权参数组合为字典
        v = {
            "authorization": authorization,
            "date": date,
            "host": self.host
        }
        # 拼接鉴权参数，生成url
        url = self.ImageUnderstanding_url + '?' + urlencode(v)
        #print(url)
        # 此处打印出建立连接时候的url,参考本demo的时候可取消上方打印的注释，比对相同参数时生成的url与自己代码生成的url是否一致
        return url


# 收到websocket错误的处理
def on_error(ws, error):
    print("### error:", error)


# 收到websocket关闭的处理
def on_close(ws,one,two):
    print(" ")


# 收到websocket连接建立的处理
def on_open(ws):
    thread.start_new_thread(run, (ws,))


def run(ws, *args):
    data = json.dumps(gen_params(appid=ws.appid, question= ws.question ))
    ws.send(data)


# 收到websocket消息的处理
def on_message(ws, message):
    #print(message)
    data = json.loads(message)
    code = data['header']['code']
    if code != 0:
        print(f'请求错误: {code}, {data}')
        ws.close()
    else:
        choices = data["payload"]["choices"]
        status = choices["status"]
        content = choices["text"][0]["content"]
        print(content,end ="")
        global answer
        answer += content
        # print(1)
        if status == 2:
            ws.close()


def gen_params(appid, question):
    """
    通过appid和用户的提问来生成请参数
    """

    data = {
        "header": {
            "app_id": appid
        },
        "parameter": {
            "chat": {
                "domain": "image",
                "temperature": 0.5,
                "top_k": 4,
                "max_tokens": 2028,
                "auditing": "default"
            }
        },
        "payload": {
            "message": {
                "text": question
            }
        }
}

    return data


def main(appid, api_key, api_secret, imageunderstanding_url,question):

    wsParam = Ws_Param(appid, api_key, api_secret, imageunderstanding_url)
    websocket.enableTrace(False)
    wsUrl = wsParam.create_url()
    ws = websocket.WebSocketApp(wsUrl, on_message=on_message, on_error=on_error, on_close=on_close, on_open=on_open)
    ws.appid = appid
    #ws.imagedata = imagedata
    ws.question = question
    ws.run_forever(sslopt={"cert_reqs": ssl.CERT_NONE})


def getText(role, content):
    jsoncon = {}
    jsoncon["role"] = role
    jsoncon["content"] = content
    text.append(jsoncon)
    return text


def getlength(text):
    length = 0
    for content in text:
        temp = content["content"]
        leng = len(temp)
        length += leng
    return length


def checklen(text):
    #print("text-content-tokens:", getlength(text[1:]))
    while (getlength(text[1:])> 8000):
        del text[1]
    return text

