from huggingface_hub import hf_hub_download
from transformers import AutoImageProcessor, TableTransformerForObjectDetection, DetrFeatureExtractor
import torch
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.patches as patches
# from IPython.display import display
from torchvision import transforms
import fitz
from fitz import Rect
from ultralytics.utils.plotting import plot_results

device = "cuda" if torch.cuda.is_available() else "cpu"
print("")

file_path = "images/table_results/0_1桃源大桥施工图4_281_page0.png"
image = Image.open(file_path).convert("RGB")
width, height = image.size


class MaxResize(object):
    def __init__(self, max_size=800):
        self.max_size = max_size

    def __call__(self, image):
        width, height = image.size
        current_max_size = max(width, height)
        scale = self.max_size / current_max_size
        resized_image = image.resize((int(round(scale * width)), int(round(scale * height))))
        return resized_image



detection_transform = transforms.Compose([
    MaxResize(800),
    transforms.ToTensor(),
    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
# pixel_values = detection_transform(image).unsqueeze(0)
# pixel_values = pixel_values.to(device)
# print(pixel_values.shape)
#
# local_path = "table-transformer-detection"
# image_processor = AutoImageProcessor.from_pretrained(local_path, local_files_only=True)
# model = TableTransformerForObjectDetection.from_pretrained(local_path, local_files_only=True)
# model.to(device)
#
# with torch.no_grad():
#     outputs = model(pixel_values)
#
#
def box_cxcywh_to_xyxy(x):
    x_c, y_c, w, h = x.unbind(-1)
    b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)]
    return torch.stack(b, dim=1)


def rescale_bboxes(out_bbox, size):
    img_w, img_h = size
    b = box_cxcywh_to_xyxy(out_bbox)
    b = b * torch.tensor([img_w, img_h, img_w, img_h], dtype=torch.float32)
    return b


#
#
# # 更新 id2label 以包含“无对象”
# id2label = model.config.id2label
# id2label[len(model.config.id2label)] = "no object"
#
#
def outputs_to_objects(outputs, img_size, id2label):
    m = outputs.logits.softmax(-1).max(-1)
    pred_labels = list(m.indices.detach().cpu().numpy())[0]
    pred_scores = list(m.values.detach().cpu().numpy())[0]
    pred_bboxes = outputs['pred_boxes'].detach().cpu()[0]
    pred_bboxes = [elem.tolist() for elem in rescale_bboxes(pred_bboxes, img_size)]

    objects = []
    for label, score, bbox in zip(pred_labels, pred_scores, pred_bboxes):
        class_label = id2label[int(label)]
        if not class_label == 'no object':
            objects.append({'label': class_label, 'score': float(score), 'bbox': [float(elem) for elem in bbox]})

    return objects


#
#
# objects = outputs_to_objects(outputs, image.size, id2label)
#
# print(objects)
#
# import matplotlib.pyplot as plt
# import matplotlib.patches as patches
# from matplotlib.patches import Patch
#
#
# def fig2img(fig):
#     """Convert a Matplotlib figure to a PIL Image and return it"""
#     import io
#     buf = io.BytesIO()
#     fig.savefig(buf)
#     buf.seek(0)
#     img = Image.open(buf)
#     return img
#
#
# def visualize_detected_tables(img, det_tables, out_path=None):
#     plt.imshow(img, interpolation="lanczos")
#     fig = plt.gcf()
#     fig.set_size_inches(20, 20)
#     ax = plt.gca()
#
#     for det_table in det_tables:
#         bbox = det_table['bbox']
#         if det_table['label'] == 'table':
#             facecolor = (1, 0, 0.45)
#             edgecolor = (1, 0, 0.45)
#             alpha = 0.3
#             linewidth = 2
#             hatch = '//'
#         elif det_table['label'] == 'table rotated':
#             facecolor = (0.95, 0.6, 0.1)
#             edgecolor = (0.95, 0.6, 0.1)
#             alpha = 0.3
#             linewidth = 2
#             hatch = '//'
#         else:
#             continue
#
#         rect = patches.Rectangle(bbox[:2], bbox[2] - bbox[0], bbox[3] - bbox[1], linewidth=linewidth, edgecolor='none',
#                                  facecolor=facecolor, alpha=0.1)
#         ax.add_patch(rect)
#         rect = patches.Rectangle(bbox[:2], bbox[2] - bbox[0], bbox[3] - bbox[1], linewidth=linewidth,
#                                  edgecolor=edgecolor, facecolor='none', linestyle='-', alpha=alpha)
#         ax.add_patch(rect)
#         rect = patches.Rectangle(bbox[:2], bbox[2] - bbox[0], bbox[3] - bbox[1], linewidth=0, edgecolor=edgecolor,
#                                  facecolor='none', linestyle='-', hatch=hatch, alpha=0.2)
#         ax.add_patch(rect)
#
#     plt.xticks([], [])
#     plt.yticks([], [])
#
#     legend_elements = [
#         Patch(facecolor=(1, 0, 0.45), edgecolor=(1, 0, 0.45), label='Table', hatch='//', alpha=0.3),
#         Patch(facecolor=(0.95, 0.6, 0.1), edgecolor=(0.95, 0.6, 0.1), label='Table (rotated)', hatch='//', alpha=0.3)
#     ]
#     plt.legend(handles=legend_elements, bbox_to_anchor=(0.5, -0.02), loc='upper center', borderaxespad=0, fontsize=10,
#                ncol=2)
#     plt.gcf().set_size_inches(10, 10)
#     plt.axis('off')
#
#     if out_path is not None:
#         plt.savefig(out_path, bbox_inches='tight', dpi=150)
#
#     return fig
#
#
# fig = visualize_detected_tables(image, objects)
# visualized_image = fig2img(fig)
#
#
# def iob(bbox1, bbox2):
#     """
#     Compute the intersection area over box area, for bbox1.
#     """
#     intersection = Rect(bbox1).intersect(bbox2)
#
#     bbox1_area = Rect(bbox1).get_area()
#     if bbox1_area > 0:
#         return intersection.get_area() / bbox1_area
#
#     return 0
#
#
# def objects_to_crops(img, tokens, objects, class_thresholds, padding=30):
#     table_crops = []
#
#     for obj in objects:
#         if obj['score'] < class_thresholds[obj['label']]:
#             continue
#
#         cropped_table = {}
#         bbox = obj['bbox']
#         bbox = [bbox[0] - padding, bbox[1] - padding, bbox[2] + padding, bbox[3] + padding]
#         cropped_img = img.crop(bbox)
#         table_tokens = [token for token in tokens if iob(token['bbox'], bbox) >= 0.5]
#
#         for token in table_tokens:
#             token['bbox'] = [
#                 token['bbox'][0] - bbox[0],
#                 token['bbox'][1] - bbox[1],
#                 token['bbox'][2] - bbox[0],
#                 token['bbox'][3] - bbox[1]
#             ]
#
#         if obj['label'] == 'table rotated':
#             cropped_img = cropped_img.rotate(270, expand=True)
#             for token in table_tokens:
#                 bbox = token['bbox']
#                 bbox = [cropped_img.size[0] - bbox[3] - 1,
#                         bbox[0],
#                         cropped_img.size[0] - bbox[1] - 1,
#                         bbox[2]]
#                 token['bbox'] = bbox
#         cropped_table['image'] = cropped_img
#         cropped_table['tokens'] = table_tokens
#         table_crops.append(cropped_table)
#     return table_crops
#
#
# tokens = []
#
# detection_class_thresholds = {
#     "table": 0.5,
#     "table rotated": 0.5,
#     "no object": 10
# }
#
# crop_padding = 10
#
# tables_crops = objects_to_crops(image, tokens, objects, detection_class_thresholds, padding=0)
#
# cropped_table = tables_crops[0]['image'].convert("RGB")
#
# cropped_table.save("table.jpg")
#
# inputs = image_processor(images=image, return_tensors="pt")
# # 推理
# outputs2 = model(**inputs)
#
# # convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax)
# target_sizes = torch.tensor([image.size[::-1]])
# results = image_processor.post_process_object_detection(outputs2, threshold=0.5, target_sizes=target_sizes)[0]
#
# # 绘制标注后的图片
# fig, ax = plt.subplots(1)
# ax.imshow(image)
#
# for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
#     box = [round(i, 2) for i in box.tolist()]
#     print(
#         f"Detected {model.config.id2label[label.item()]} with confidence "
#         f"{round(score.item(), 3)} at location {box}"
#     )
#     # 绘制边界框
#     rect = patches.Rectangle((box[0], box[1]), box[2] - box[0], box[3] - box[1], linewidth=2, edgecolor='r',
#                              facecolor='none')
#     ax.add_patch(rect)
#     # 添加标签和置信度
#     ax.text(box[0], box[1], f'{model.config.id2label[label.item()]}: {round(score.item(), 3)}', color='r', fontsize=10,
#             bbox=dict(facecolor='white', alpha=0.5))
#
# # 保存标注后的图片
# plt.axis('off')
# plt.savefig('annotated_image.png', bbox_inches='tight', pad_inches=0)
# plt.show()

from transformers import TableTransformerForObjectDetection

# new v1.1 checkpoints require no timm anymore
# Load model directly
from transformers import AutoImageProcessor, AutoModelForObjectDetection
from matplotlib.patches import Rectangle

processor = AutoImageProcessor.from_pretrained("table-transformer-structure-recognition-v1.1-all",
                                               local_files_only=True)
structure_model = AutoModelForObjectDetection.from_pretrained("table-transformer-structure-recognition-v1.1-all",
                                                              local_files_only=True)
structure_model.to(device)

encoding = processor(images=image, size=image.size, return_tensors="pt")
with torch.no_grad():
    outputs = structure_model(**encoding)
target_sizes = [image.size[::-1]]
results = processor.post_process_object_detection(outputs, threshold=0.8, target_sizes=target_sizes)[0]
print(results)
print("load table-transformer-structure-recognition-v1.1-fin")
# 获取唯一的标签
unique_labels = torch.unique(results['labels'])

# 为每个唯一标签创建一个子图
num_labels = len(unique_labels)
fig, axs = plt.subplots(1, num_labels, figsize=(15, 5))

if num_labels == 1:
    axs = [axs]

for i, label in enumerate(unique_labels):
    ax = axs[i]
    ax.imshow(image)
    for j in range(len(results['boxes'])):
        if results['labels'][j] == label:
            xmin, ymin, xmax, ymax = results['boxes'][j].numpy()
            width = xmax - xmin
            height = ymax - ymin
            rect = Rectangle((xmin, ymin), width, height, linewidth=1, edgecolor='r', facecolor='none')
            ax.add_patch(rect)
            label_text = f"Label: {results['labels'][j]}, Score: {results['scores'][j]:.2f}"
            ax.text(xmin, ymin - 10, label_text, color='r', fontsize=8)
    ax.set_title(f"Label: {label}")

plt.show()
# structure_transform = transforms.Compose([
#     MaxResize(1000),
#     transforms.ToTensor(),
#     transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
# ])
#
# pixel_values = structure_transform(image).unsqueeze(0)
# pixel_values = pixel_values.to(device)
# print(pixel_values.shape)
#
# # forward pass
# with torch.no_grad():
#     outputs = structure_model(pixel_values)
#
# # update id2label to include "no object"
# structure_id2label = structure_model.config.id2label
# structure_id2label[len(structure_id2label)] = "no object"
# cells = outputs_to_objects(outputs, image.size, structure_id2label)
#
# print(cells)
#
# from PIL import ImageDraw
#
# cropped_table_visualized = image.copy()
#
# draw = ImageDraw.Draw(cropped_table_visualized)
#
# for cell in cells:
#     draw.rectangle(cell["bbox"], outline="red")
#
# cropped_table_visualized.show()
#
#
# def get_cell_coordinates_by_row(table_data):
#     # Extract rows and columns
#     rows = [entry for entry in table_data if entry['label'] == 'table row']
#     columns = [entry for entry in table_data if entry['label'] == 'table column']
#
#     # Sort rows and columns by their Y and X coordinates, respectively
#     rows.sort(key=lambda x: x['bbox'][1])
#     columns.sort(key=lambda x: x['bbox'][0])
#     # Function to find cell coordinates
#     def find_cell_coordinates(row, column):
#         cell_bbox = [column['bbox'][0], row['bbox'][1], column['bbox'][2], row['bbox'][3]]
#         return cell_bbox
#     # Generate cell coordinates and count cells in each row
#     cell_coordinates = []
#     for row in rows:
#         row_cells = []
#         for column in columns:
#             cell_bbox = find_cell_coordinates(row, column)
#             row_cells.append({'column': column['bbox'], 'cell': cell_bbox})
#         # Sort cells in the row by X coordinate
#         row_cells.sort(key=lambda x: x['column'][0])
#         # Append row information to cell_coordinates
#         cell_coordinates.append({'row': row['bbox'], 'cells': row_cells, 'cell_count': len(row_cells)})
#     # Sort rows from top to bottom
#     cell_coordinates.sort(key=lambda x: x['row'][1])
#     return cell_coordinates
#
#
# cell_coordinates = get_cell_coordinates_by_row(cells)
# len(cell_coordinates)
# len(cell_coordinates[0]["cells"])
#
# for row in cell_coordinates:
#     print(row["cells"])
#
# import numpy as np
#
# import csv
#
# import easyocr
#
# from tqdm.auto import tqdm
#
# reader = easyocr.Reader(['ch_sim', 'en'], model_storage_directory="EasyOCR_model")  # 这只需要运行一次以将模型加载到内存中
#
#
# def apply_ocr(cell_coordinates):
#     # 逐行进行 OCR
#     data = dict()
#     max_num_columns = 0
#     for idx, row in enumerate(tqdm(cell_coordinates)):
#         row_text = []
#         for cell in row["cells"]:
#             # 从图像中裁剪出单元格
#             cell_image = np.array(image.crop(cell["cell"]))
#             # 应用 OCR
#             result = reader.readtext(np.array(cell_image))
#             if len(result) > 0:
#                 # 打印 OCR 结果
#                 text = " ".join([x[1] for x in result])
#                 row_text.append(text)
#         if len(row_text) > max_num_columns:
#             max_num_columns = len(row_text)
#         data[idx] = row_text
#     print("Max number of columns:", max_num_columns)
#     # 填补没有最大列数的行
#     # 确保所有行都有相同的列数
#     for row, row_data in data.copy().items():
#         if len(row_data) != max_num_columns:
#             row_data = row_data + ["" for _ in range(max_num_columns - len(row_data))]
#         data[row] = row_data
#     return data
#
#
# data = apply_ocr(cell_coordinates)
# for row, row_data in data.items():
#     print(row_data)
