import geopandas as gpd
from openpyxl import Workbook,load_workbook
from tqdm import tqdm
import fiona
import pandas as pd
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin, urlparse



def shp2xlsData(path,bdf_path):

    # with fiona.open(path) as src:
    #     print("SHP文件完整架构:", src.schema)
    #     print("所有列名:", list(src.schema['properties'].keys()))

    # bdf_data = pd.read_csv(bdf_path,encoding='gb2312')

    # bdf_data.to_excel("PSR_84_New.xlsx",
    #                   index=False,
    #                   engine='openpyxl'
    #                   )

    # print("属性表数据成功写入。")

    wb = Workbook()
    wv = wb.active

    wv.append(['gid', 'area_km2', 'name', 'lon', 'lat',"Lower Left Longitude",
               "Lower Left Latitude","Lower Right Longitude","Lower Right Latitude",
               "Upper Left Longitude","Upper Left Latitude","Upper Right Longitude","Upper Right Latitude"
               ])

    shapefile = gpd.read_file(path)
    shapefile.info()

    #遍历所有的行列
    #rows, columns = shapefile.shape

    selected_columns = shapefile.columns.tolist()
    print(selected_columns)
    #selected_columns = ['gid', 'area_km2', 'name', 'CENTROID_X', 'CENTROID_Y']
    for _,row in tqdm(shapefile.iterrows(),total=len(shapefile),desc="写入Excel"):
        row_data = [row[col] for col in selected_columns]
        
        #print(row_data[5].bounds)
        #组一下最大外包矩形框的坐标
        minx,miny,maxx,maxy = row_data[5].bounds

        point = [
            (minx,miny), #左下角
            (maxx,miny),# 右下角
            (maxx,maxy),# 右上角
            (minx,maxy) # 左上角
        ]

        for tuple_tmp in point:
            row_data.append(tuple_tmp[0]) 
            row_data.append(tuple_tmp[1])

        data = [value for index, value in enumerate(row_data) if index != 5]
        wv.append(data)

    
    wb.save("PSR_84_New.xlsx")
    print("已经将shp数据转换为xlsx")


def getShadowId(shadow_id_path):
    
    #先读取shadow文件里面的数据
    wb = load_workbook(shadow_id_path,read_only=True)

    ws = wb.active
    shadow_id_array = []
    flag = True
    for row in ws.rows:
        if flag:
            flag = False
            continue
        row_data = []
        for _,cell in enumerate(row):
            row_data.append(cell.value)
        shadow_id_array.append(row_data)

    


    return shadow_id_array

def PutShadowID(shadow_id_array,xlsx_path,Numbers):

    #print(shadow_id_array)

    #怕网页标签元素
    base_url = "https://data.im-ldi.com/mds/shadowcam_published/"
    headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
    "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
    "Connection": "keep-alive",
}


    wb = Workbook(
    )
    wv = wb.active

    wv.append(["影像编号",
                "Center Latitude",
                "Center Longitude",
                "Lower Left Latitude",
                "Lower Left Longitude",
                "Lower Right Latitude",
                "Lower Right Longitude",
                "Upper Left Latitude",
                "Upper Left Longitude",
                "Upper Right Latitude",
                "Upper Right Longitude"
                ])
    countNum = 0
    for shadow in tqdm(shadow_id_array,total=len(shadow_id_array),desc="爬取影像进度："):

        # if idx not in range(Numbers[0],Numbers[1]+1):
        #     continue
        #base_url+id为目的地址
        url = base_url + shadow[0]

        try:
            response = requests.get(url,headers=headers)
            if response.status_code != 200:
                print("无法访问！")
                return
            
            #爬取元素
            # 解析HTML内容
            soup = BeautifulSoup(response.text, 'html.parser')
            count = 0
            localtion_temp = [shadow[0]]
            
            for tbody in soup.find_all('tbody'):
                #print(tbody)
                #print(count)
                #第7个就是需要的table
                if count == 7:
                    td_data = [td.get_text(strip=True) for td in tbody.find_all('td')]
                    print(td_data)

                    for index,value in enumerate(td_data):
                        
                        if index == 10:
                            break
                        localtion_temp.append(float(value))
                        
                        #localtion_dict[Text_array[index]] = float(value)
                    #print(localtion_dict)

                count +=1
            countNum+=1
        except Exception as e:
            print("爬取网页时候出现错误！")
            num = countNum + Numbers[0]
            wb.save(f"getURL_Location_Data-{Numbers[0]}-{num}.xlsx")
            return
        
        wv.append(localtion_temp)
    num = countNum + Numbers[0]
    
    wb.save(f"getURL_Location_Data-{Numbers[0]}-{num}.xlsx")
    print("网页数据爬取完成！")

    return


if __name__ == "__main__":


    #转换shp格式数据为xls
    #shp 的文件位置
    #首先要去读取PR84的shp文件找到对应坑的位置
    #shp_path = "./PSR84/PSR_84.shp"
    #bdf_path = "./PSR84/PSR_84.dbf"
    #shp2xlsData(shp_path,bdf_path)

    #然后根据坑的经纬度信息去找有tif影像能覆盖到他的影像ID

    xlsx_path = "./PSR_84_New.xlsx"
    shadow_id_path = "./shadow_id_URL_tif.xlsx"

    shadow_id_array = getShadowId(shadow_id_path)
    #你要爬取的影像 序号，0到666就是代表爬取前 666条数据 已知：总共有10447条数据   其他地方不需要改动 只需要改这个号的范围即可
    Numbers = (7201,8000)

    PutShadowID(shadow_id_array[Numbers[0]:Numbers[1]],xlsx_path,Numbers)


    #根据影像ID 从爬取的所有URL对应表中查询其对应的URL 放到新PSR_84.xls中