# 导入必要的库
import re
import urllib.request
from urllib.error import URLError, HTTPError
import requests
import json
import csv
import pandas as pd

# 增加字段长度限制
csv.field_size_limit(1000000)  # 设置为 1MB，可以根据需要调整

# 定义要访问的URL
urls = [
    "https://bj.fang.com/quanwangso/search.html?city=bj&refer=sy_seach",
    "https://www.guazi.com/bj/buy/"
]

# 处理文件名中的非法字符
def sanitize_filename(filename):
    return re.sub(r'[\\/*?:"<>|]', "", filename)

# 使用urllib库访问URL
def access_with_urllib(url):
    try:
        # 发送请求
        response = urllib.request.urlopen(url)
        
        # 获取状态码
        status_code = response.getcode()
        
        # 获取返回对象的属性
        headers = response.headers
        
        # 获取返回体
        content = response.read().decode('utf-8')
        
        # 打印结果
        print(f"URL: {url} (urllib)")
        print(f"Status Code: {status_code}")
        print(f"Headers: {headers}")
        print(f"Content (first 100 characters): {content[:100]}")

        # 生成合法的文件名
        filename = sanitize_filename(f"{url.split('/')[-1]}_urllib.html")
        
        # 保存结果到文件
        with open(filename, 'w', encoding='utf-8') as file:
            file.write(content)
    
    except HTTPError as e:
        print(f"HTTP Error: {e.code} - {e.reason}")
    except URLError as e:
        print(f"URL Error: {e.reason}")

# 使用requests库访问URL
def access_with_requests(url):
    try:
        # 发送请求
        response = requests.get(url)
        
        # 获取状态码
        status_code = response.status_code
        
        # 获取返回对象的属性
        headers = response.headers
        
        # 获取返回体
        content = response.text
        
        # 打印结果
        print(f"URL: {url} (requests)")
        print(f"Status Code: {status_code}")
        print(f"Headers: {headers}")
        print(f"Content (first 100 characters): {content[:100]}")

        # 生成合法的文件名
        filename = sanitize_filename(f"{url.split('/')[-1]}_requests.html")
        
        # 保存结果到文件
        with open(filename, 'w', encoding='utf-8') as file:
            file.write(content)
    
    except requests.exceptions.RequestException as e:
        print(f"Request Error: {e}")

# 保存数据为JSON文件
def save_to_json(data, filename):
    with open(filename, 'w', encoding='utf-8') as file:
        json.dump(data, file, ensure_ascii=False, indent=4)

# 保存数据为CSV文件
def save_to_csv(data, filename):
    with open(filename, 'w', newline='', encoding='utf-8') as file:
        writer = csv.writer(file)
        writer.writerow(['URL', 'Content'])
        for item in data:
            writer.writerow([item['url'], item['content']])

# 读取保存的数据
def read_data_from_file(filename):
    if filename.endswith('.json'):
        with open(filename, 'r', encoding='utf-8') as file:
            data = json.load(file)
    elif filename.endswith('.csv'):
        with open(filename, 'r', newline='', encoding='utf-8') as file:
            reader = csv.DictReader(file)
            data = list(reader)
    elif filename.endswith('.xlsx'):
        df = pd.read_excel(filename)
        data = df.to_dict(orient='records')
    else:
        raise ValueError("Unsupported file format")
    return data

# 循环访问抽取的链接并解析返回的网页内容
def parse_links(data):
    from pyquery import PyQuery as pq
    
    for item in data:
        url = item['URL']
        try:
            response = requests.get(url)
            if response.status_code == 200:
                doc = pq(response.text)
                categories = []
                for item in doc('.nav > a').items():
                    category = {
                        'name': item.text(),
                        'url': item.attr('href')
                    }
                    categories.append(category)
                print(f"Parsed categories for {url}: {categories}")
            else:
                print(f"Failed to fetch {url}, status code: {response.status_code}")
        except requests.exceptions.RequestException as e:
            print(f"Request Error for {url}: {e}")

# 主函数
def main():
    data = []
    for url in urls:
        access_with_requests(url)
        response = requests.get(url)
        if response.status_code == 200:
            content = response.text
            data.append({'url': url, 'content': content})
    
    # 保存数据
    save_to_json(data, 'data.json')
    save_to_csv(data, 'data.csv')

    # 读取数据
    json_data = read_data_from_file('data.json')
    csv_data = read_data_from_file('data.csv')

    # 解析链接
    parse_links(json_data)
    parse_links(csv_data)

if __name__ == "__main__":
    main()


