import requests
from bs4 import BeautifulSoup
import json
import re
import os
import test
import time
import random
from queue import Queue
import threading
#获取详情页户型图链接
def get_house_img_url(text):
    soup = BeautifulSoup(text, 'html.parser')
    item = soup.find("li",attrs={'data-desc': '户型图'})
    if item is None:
        return ''
    return item['data-pic']

#获取 地区
def get_house_area(text):
    soup = BeautifulSoup(text, 'html.parser')
    area = soup.find("div",class_="areaName").find('span',class_='info').find('a')['href']
    match = re.search(r'/([^/]+)/([^/]+)/', area)
    return match.group(2)


#获取详情页id
def get_house_id(text):
    soup = BeautifulSoup(text, 'html.parser')
    return  soup.find('div',attrs={'data-lj_action_resblock_id': True})['data-lj_action_resblock_id']

def get_house_info(text):
    soup = BeautifulSoup(text, 'html.parser')
    json_data = '''
        {
            "houseInfo": {
                "mainInfo": "",
                "subInfo": "",
                "sourceInfo": ""
            },
            "aroundInfo": {
                "communityName": "",
                "areaName": ""
            },
            "imgBase64": "",
            "imgUrl": ""
        }
        '''
    
    #sourceInfo
    container = soup.find('div',attrs={'mod-id': 'lj-common-bread'})
    data_dict = json.loads(json_data)

    if container:
        a_list = container.find_all('a')
        areaList = []
        for a in a_list:
            areaList.append(a.text)
        data_dict['houseInfo']['sourceInfo'] = '-'.join(areaList)
    else:
        data_dict['houseInfo']['sourceInfo'] = ''
    community_div = soup.find('div', class_='communityName')
    if community_div:
        community_name = community_div.find('a', class_='info').text
        data_dict['aroundInfo']['communityName'] = community_name
    #mainInfo
    room_div = soup.find('div', class_='houseInfo')
    if room_div:
        mainInfo_list = room_div.find_all('div', class_='mainInfo')
        mainInfo_list_text = []
        for t in mainInfo_list:
            mainInfo_list_text.append(t.text)
        subInfo_list = room_div.find_all('div', class_='subInfo')
        subInfo_list_text = []
        for s in subInfo_list:
            subInfo_list_text.append(s.text)
        data_dict['houseInfo']['mainInfo'] =   '-'.join(mainInfo_list_text)
        data_dict['houseInfo']['subInfo'] =   '/'.join(subInfo_list_text)
        image_url = get_house_img_url(text)
        data_dict['imgUrl'] = image_url
    id = get_house_id(text)
    folder_name = get_house_area(text)
    full_path = os.path.join(test.base_dir, folder_name)
    os.makedirs(full_path, exist_ok=True)  # exist_ok=True 避免文件夹已存在时报错
    file_path = test.base_dir + '/'+ folder_name + '/'+ id
    with open(file_path +'.json', 'w', encoding='utf-8') as file:
        json.dump(data_dict, file, ensure_ascii=False, indent=4)
    if (image_url != ''):
        img_path = file_path+'.png'
        # 发送 HTTP GET 请求下载图片
        response = requests.get(image_url)

        # 检查请求是否成功
        if response.status_code == 200:
            # 将图片内容写入本地文件
            with open(img_path, 'wb') as file:
                file.write(response.content)
        else:
            print(f"下载失败，状态码: {response.status_code}")

def save_info_by_id(id,folder_name):
    if is_exists(id,folder_name):
        return
    test.random_sleep(0.1,2)
    id_text = test.get_page_text(test.detail_base_url+str(id)+'.html')
        # test.random_sleep(0.5,1)
    get_house_info(id_text)

def get_one_url_info(url):
    page_text = test.get_page_text(url)
    url_list = test.get_all_page_url(page_text)
    ids = []
    for u in url_list:
        # test.random_sleep(0.5,1)
        text = test.get_page_text(test.base_url+ u)
        id_list = test.get_id_list(text)
        ids.append(id_list)
    
        id_list = test.expand_list(ids)
    detail_text = test.get_page_text(test.detail_base_url+str(id_list[0])+'.html')
    folder_name = get_house_area(detail_text)
    total_len = len(id_list)
    return {
        'idList':id_list,
        'folderName': folder_name,
        'totalLength': total_len
    }

def is_exists(id,folder_name):
    file_path = test.base_dir+'/' +folder_name+'/' + id + '.json'
    if os.path.exists(file_path):
        return True
    else:
        return False





def multi_thread_url(url):
    def thread_ids():
        try:
            while True:
                # 获取锁
                if not id_list:
                    break
                # 从列表中取出一个任务
                id = id_list.pop()
                print(f"线程 {threading.current_thread().name} 正在处理任务: {id},完成{str(round((total_len -  len(id_list))/total_len * 100,2))}%")
                save_info_by_id(id,folder_name)

        except TypeError as e:
            print(f"捕获到异常: {e}")
    dic = get_one_url_info(url)
    id_list = dic['idList']
    folder_name = dic['folderName']
    total_len = dic['totalLength']
    num_threads = 10  
    threads = []

    for i in range(num_threads):
        thread = threading.Thread(target=thread_ids, name=f"Worker-{i+1}")
        thread.start()
        threads.append(thread)

    for thread in threads:
        thread.join()

def multi_thread_all_url():
    list = test.get_less_3000_url_list()
    # 锁对象
    lock = threading.Lock()
    def final():
        try:
            while True:
                if not list:
                    break

                # 从列表中取出一个任务
                t_url = list.pop()
                print(f"当前URL{t_url}")
                multi_thread_url(t_url)
        except TypeError as e:
            print(f"捕获到异常: {e}")

    num_threads = 20  
    threads = []
    for i in range(num_threads):
        thread = threading.Thread(target=final)
        thread.start()
        threads.append(thread)

    for thread in threads:
        thread.join()
if __name__ == "__main__":
    multi_thread_all_url()
