import geopandas as gpd
from openpyxl import Workbook, load_workbook
from tqdm import tqdm
import fiona
import pandas as pd
import requests
from shapely.geometry import Polygon,  MultiLineString,Point
from pyproj import CRS
from shapely.geometry import MultiPolygon,Polygon, MultiLineString, LineString
import matplotlib.pyplot as plt
from shapely.ops import unary_union
from bs4 import BeautifulSoup
from urllib.parse import urljoin, urlparse
import queue
import threading
output_queue = queue.Queue()

def shp2xlsData(path, bdf_path):
    # with fiona.open(path) as src:
    #     print("SHP文件完整架构:", src.schema)
    #     print("所有列名:", list(src.schema['properties'].keys()))

    # bdf_data = pd.read_csv(bdf_path,encoding='gb2312')

    # bdf_data.to_excel("PSR_84_New.xlsx",
    #                   index=False,
    #                   engine='openpyxl'
    #                   )

    # print("属性表数据成功写入。")

    wb = Workbook()
    wv = wb.active

    wv.append(['gid', 'area_km2', 'name', 'lon', 'lat', "Upper Left Longitude",
               "Upper Left Latitude", "Upper Right Longitude", "Upper Right Latitude", "Lower Right Longitude",
               "Lower Right Latitude",
               "Lower Left Longitude", "Lower Left Latitude"
               ])

    shapefile = gpd.read_file(path)
    shapefile.info()

    # 遍历所有的行列
    # rows, columns = shapefile.shape

    selected_columns = shapefile.columns.tolist()
    print(selected_columns)
    # selected_columns = ['gid', 'area_km2', 'name', 'CENTROID_X', 'CENTROID_Y']
    for _, row in tqdm(shapefile.iterrows(), total=len(shapefile), desc="写入Excel"):
        row_data = [row[col] for col in selected_columns]

        # print(row_data[5].bounds)
        # 组一下最大外包矩形框的坐标
        minx, miny, maxx, maxy = row_data[5].bounds

        point = [
            (minx, miny),  # 左下角
            (maxx, miny),  # 右下角
            (maxx, maxy),  # 右上角
            (minx, maxy)  # 左上角
        ]

        for tuple_tmp in point:
            row_data.append(tuple_tmp[0])
            row_data.append(tuple_tmp[1])

        data = [value for index, value in enumerate(row_data) if index != 5]
        wv.append(data)

    wb.save("PSR_84_New.xlsx")
    print("已经将shp数据转换为xlsx")


def getShadowId(shadow_id_path):
    # 先读取shadow文件里面的数据
    wb = load_workbook(shadow_id_path, read_only=True)

    ws = wb.active
    shadow_id_array = []
    flag = True
    for row in ws.rows:
        if flag:
            flag = False
            continue
        row_data = []
        for _, cell in enumerate(row):
            row_data.append(cell.value)
        shadow_id_array.append(row_data)

    return shadow_id_array


def PutShadowID(shadow_id_array, xlsx_path, Numbers):
    # print(shadow_id_array)

    # 怕网页标签元素
    base_url = "https://data.im-ldi.com/mds/shadowcam_published/"
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
        "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
        "Connection": "keep-alive",
    }

    wb = Workbook(
    )
    wv = wb.active

    wv.append(["影像编号",
               "Center Latitude",
               "Center Longitude",
               "Lower Left Latitude",
               "Lower Left Longitude",
               "Lower Right Latitude",
               "Lower Right Longitude",
               "Upper Left Latitude",
               "Upper Left Longitude",
               "Upper Right Latitude",
               "Upper Right Longitude"
               ])

    for idx, shadow in enumerate(tqdm(shadow_id_array, total=(Numbers[1] - Numbers[0] + 1), desc="爬取影像进度：")):

        if idx not in range(Numbers[0], Numbers[1] + 1):
            continue
        # base_url+id为目的地址
        url = base_url + shadow[0]

        try:
            response = requests.get(url, headers=headers)
            if response.status_code != 200:
                print("无法访问！")
                return

            # 爬取元素
            # 解析HTML内容
            soup = BeautifulSoup(response.text, 'html.parser')
            count = 0
            localtion_temp = [shadow[0]]

            for tbody in soup.find_all('tbody'):
                # print(tbody)
                # print(count)
                # 第7个就是需要的table
                if count == 7:
                    td_data = [td.get_text(strip=True) for td in tbody.find_all('td')]
                    print(td_data)

                    for index, value in enumerate(td_data):

                        if index == 10:
                            break
                        localtion_temp.append(float(value))
                        # localtion_dict[Text_array[index]] = float(value)
                    # print(localtion_dict)

                count += 1
        except Exception as e:
            print("爬取网页时候出现错误！")
            return

        wv.append(localtion_temp)

    wv.save("getURL_Location_Data.xlsx")
    print("网页数据爬取完成！")

    return


def get_intersecting_shapefile(shp_path, location_data_path, url_data_path, prj_path,lat_min=-81,FID_Boundary=(0,0),area_over=0.2,):
    # 读取shapefile
    shapefile = gpd.read_file(shp_path)

    # 读取URL数据
    url_data = pd.read_excel(url_data_path)


    # 读取moon_2000_slope.prj文件并获取投影信息
    with open(prj_path, 'r') as prj_file:
        prj_text = prj_file.read().strip()
    crs_target = CRS.from_string(prj_text)  # 读取目标投影
    # 获取shapefile原始坐标系
    original_crs = shapefile.crs

    # # 将shapefile转换为目标坐标系（moon_2000_slope）
    shapefile = shapefile.to_crs(crs_target)  # 转换为目标坐标系
    shapefile["geometry"] = shapefile["geometry"].apply(
        lambda geom: MultiPolygon([geom]) if isinstance(geom, Polygon) else geom
    )
    # # 可视化
    # shapefile.plot()
    # plt.title("Shapefile Preview")
    # plt.axis("equal")  # 保持比例尺
    # plt.savefig(f"SHP84.png", dpi=150)
    # 读取Location数据
    location_data = pd.read_excel(location_data_path)

    # 遍历shapefile的每一行

    with tqdm(total=(FID_Boundary[1]-FID_Boundary[0]+1),desc="处理进度：") as pbar:

        output_data = []

        for idx, row in shapefile.iterrows():
            
            if idx<FID_Boundary[0] or idx> FID_Boundary[1]:
                continue
            # 获取shapefile的几何边界
            geom = row['geometry']
            # 获取shapefile的gid、area_km2和name以及质心坐标
            gid = row['gid']
            area_km2 = row['area_km2']  # 转换为平方千米
            name = row['name']
            centroid_x, centroid_y = geom.centroid.x, geom.centroid.y
            # 获取边界（即外部的LineString）
            boundary = geom.boundary

            # 创建绘图
            # fig, ax = plt.subplots(figsize=(10, 10))

            # 如果边界是一个LineString（有时会是MultiLineString），则可以将它转换为一个多边形
            if isinstance(boundary, MultiLineString):
                # 选择最大的线段（或根据其他标准选择），例如最大长度的线段
                largest_line = max(boundary.geoms, key=lambda line: line.length)
                # 创建一个闭合的Polygon（假设该线段能够封闭成一个多边形）
                coords = list(largest_line.coords)
                coords.append(coords[0])  # 闭合线段
                geom = Polygon(coords)  # 将其转换为Polygon

                # 可视化最大线段构成的多边形
                # x, y = geom.exterior.xy
                # ax.fill(x, y, color='lightblue', alpha=0.5, edgecolor='black', label="Largest Polygon")

            elif isinstance(boundary, Polygon):
                # 如果直接是Polygon类型，直接处理
                geom = boundary

            #8个线程 
            chunks = []
            chunk_size = len(location_data) // 8
            for i in range(8):
                start = i * chunk_size
                end = start + chunk_size if i < 7 else len(location_data)
                chunks.append(location_data.iloc[start:end])
            # 创建并启动线程
            threads = []
            for chunk in chunks:
                thread = threading.Thread(
                    target=process_location_chunk,
                    args=(chunk, geom, original_crs, crs_target, url_data, gid, area_km2, name, centroid_x, centroid_y,lat_min)
                )
                threads.append(thread)
                thread.start()

            # 等待所有线程完成
            for thread in threads:
                thread.join()

            #process_location_chunk(chunk,geom,original_crs,original_crs, crs_target, url_data, gid, area_km2, name, centroid_x, centroid_y,lat_min)
            while not output_queue.empty():
                output_data.append(output_queue.get())


            pbar.update(1)

        # plt.close(fig)

    # 将结果保存为Excel
    output_df = pd.DataFrame(output_data)
    output_df.to_excel(f'output_result-{FID_Boundary[0]}-{FID_Boundary[1]}.xlsx', index=False)

    print("处理完成，结果已保存到output_result.xlsx")


#封装为线程函数
def process_location_chunk(chunk,geom,original_crs,crs_target, url_data, gid, area_km2, name, centroid_x, centroid_y,lat_min):

    # 遍历Location_Data_path中的每行，检查是否与shapefile相交
    for _, loc_row in chunk.iterrows():
        # 读取Location的四个角坐标
        loc_center_lat = loc_row['Center Latitude']
        loc_lower_left_lon = loc_row['Lower Left Longitude']
        loc_lower_left_lat = loc_row['Lower Left Latitude']
        loc_lower_right_lon = loc_row['Lower Right Longitude']
        loc_lower_right_lat = loc_row['Lower Right Latitude']
        loc_upper_left_lon = loc_row['Upper Left Longitude']
        loc_upper_left_lat = loc_row['Upper Left Latitude']
        loc_upper_right_lon = loc_row['Upper Right Longitude']
        loc_upper_right_lat = loc_row['Upper Right Latitude']
        if loc_center_lat >= lat_min :
            continue  # 如果纬度不符合条件，跳过当前记录
        # 将location的四个交点转换到目标坐标系
        loc_polygon = Polygon([
            (loc_upper_left_lon, loc_upper_left_lat),
            (loc_upper_right_lon, loc_upper_right_lat),
            (loc_lower_right_lon, loc_lower_right_lat),
            (loc_lower_left_lon, loc_lower_left_lat),
        ])

        # 将location polygon转换为与shapefile相同的坐标系
        loc_polygon_gdf = gpd.GeoDataFrame(geometry=[loc_polygon], crs=original_crs)  # 使用shapefile的原始坐标系
        loc_polygon_gdf = loc_polygon_gdf.to_crs(crs_target)  # 转换为目标投影

        # 判断shapefile的几何与Location四边形是否相交
        intersection_found = geom.intersects(loc_polygon_gdf.geometry[0])

        if intersection_found:
            # 查找对应影像编号的URL信息
            image_id = loc_row['影像编号']
            url_row = url_data[url_data['影像编号'] == image_id]
            # loc_x, loc_y = loc_polygon_gdf.geometry[0].exterior.xy
            # ax.fill(loc_x, loc_y, color='lightgreen', alpha=0.5, edgecolor='black', label="Location Polygon")
            # # 设置图表属性
            # ax.set_title(f"Intersection of Shapefile and Location (gid: {gid})")
            # ax.set_xlabel("Longitude")
            # ax.set_ylabel("Latitude")
            # ax.legend()
            # # 保存结果为图片
            # plt.savefig(f'shapefile_location_intersection_{gid}.png')
            if not url_row.empty:
                image_url = url_row.iloc[0]['URL']  # 获取URL列数据
                # 如果有相交，记录结果
                output_queue.put({
                    'gid': gid,
                    'area_km2': area_km2,
                    'name': name,
                    'CENTROID_X': centroid_x,
                    'CENTROID_Y': centroid_y,
                    '影像编号': loc_row['影像编号'],
                    'URL': image_url  # 保存URL
                })


if __name__ == "__main__":
    # 转换shp格式数据为xls
    # shp 的文件位置
    # 首先要去读取PR84的shp文件找到对应坑的位置
    shp_path = "./PSR84/PSR_84.shp"
    bdf_path = "./PSR84/PSR_84.dbf"
    # shp2xlsData(shp_path,bdf_path)

    # 然后根据坑的经纬度信息去找有tif影像能覆盖到他的影像ID

    xlsx_path = "./PSR_84_New1.xlsx"
    shadow_id_path = "./shadow_id_URL_tif.xlsx"
    #
    # shadow_id_array = getShadowId(shadow_id_path)
    # # 你要爬取的影像 序号，0到666就是代表爬取前 666条数据 已知：总共有10447条数据
    # Numbers = (0, 666)
    #
    # PutShadowID(shadow_id_array, xlsx_path, Numbers)
    Location_Data_path = "./getURL_Location_Data_All.xlsx"
    prj_path="./Moon2000_spole.prj"
    FID_Boundary = (351,556)
    # 根据影像ID 从爬取的所有URL对应表中查询其对应的URL 放到新PSR_84.xls中
    get_intersecting_shapefile(shp_path, Location_Data_path, shadow_id_path,prj_path,-81,FID_Boundary)
