import urllib.request
from html.parser import HTMLParser
import os
import json
import re

class PrivacyManifestParser:
    def __init__(self):
        self.api_categories = []
        self.api_reasons = {}
        self.data_types = []
        self.tracking_reasons = []
        
    def handle_starttag(self, tag, attrs):
        attrs = dict(attrs)
        if tag == 'h2':
            self.current_api = None
            if 'api-title' in attrs.get('class', ''):
                self.current_api = attrs.get('data-api-name')
                if self.current_api and self.current_api not in self.api_categories:
                    self.api_categories.append(self.current_api)
                    self.api_reasons[self.current_api] = []
        
        elif tag == 'div' and 'api-reason' in attrs.get('class', ''):
            self.in_reason = True
            
    def handle_data(self, data):
        data = data.strip()
        if not data:
            return
            
        if self.in_reason and self.current_api:
            if data not in self.api_reasons[self.current_api]:
                self.api_reasons[self.current_api].append(data)
                
    def handle_endtag(self, tag):
        if tag == 'div' and self.in_reason:
            self.in_reason = False

    def parse(self, html):
        self.feed(html)
            self.current_reason = False
            
        # 提取API类别
        api_categories = [
            'File timestamp APIs',
            'System boot time APIs',
            'Disk space APIs',
            'Active keyboard APIs',
            'User Defaults APIs'
        ]
        
        if data in api_categories:
            self.api_categories.append(data)
            self.current_api = data
            self.api_reasons[data] = []
            
            
        # 提取数据类型
        data_types = [
            'Name', 'Email address', 'Phone number', 'Physical address',
            'Health', 'Fitness', 'Payment info', 'Credit info',
            'Precise location', 'Coarse location', 'Contacts',
            'Photos or videos', 'Audio data', 'Customer support',
            'User ID', 'Device ID', 'Purchase history',
            'Crash data', 'Performance data'
        ]
        
        if data in data_types and data not in self.data_types:
            self.data_types.append(data)
            
        # 提取跟踪原因
        tracking_reasons = [
            '3rd party ads', 'Developer ads', 'Analytics',
            'Personalization', 'App functionality', 'Other'
        ]
        
        if data in tracking_reasons and data not in self.tracking_reasons:
            self.tracking_reasons.append(data)
            
    def handle_endtag(self, tag):
        if tag == 'h1':
            self.in_api_category = False
        elif tag == 'select':
            self.in_data_type = False

def fetch_website(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8'
    }
    
    try:
        req = urllib.request.Request(url, headers=headers)
        try:
            with urllib.request.urlopen(req, timeout=10) as response:
                return response.read().decode('utf-8')
        except urllib.error.URLError as e:
            print(f"连接失败: {e}")
            # 重试一次
            try:
                with urllib.request.urlopen(req, timeout=10) as response:
                    return response.read().decode('utf-8')
            except urllib.error.URLError as e:
                print(f"重试失败: {e}")
                return None
    except Exception as e:
        print(f"获取网站失败: {e}")
        return None

def analyze_privacy_manifest(url):
    print(f"开始分析隐私清单生成器网站: {url}")
    
    # 创建输出目录
    output_dir = "downloads"
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    
    # 获取网站内容
    html = fetch_website(url)
    if not html:
        return
    
    # 保存原始HTML
    with open(os.path.join(output_dir, "original.html"), "w", encoding="utf-8") as f:
        f.write(html)
    
    # 解析内容
    parser = PrivacyManifestParser()
    parser.parse(html)
    
    # 整理分析结果
    analysis = {
        "API类别": [
            {
                "名称": api,
                "访问原因": parser.api_reasons.get(api, [])
            } for api in parser.api_categories
        ],
        "数据类型": parser.data_types,
        "跟踪原因": parser.tracking_reasons,
        "使用说明": [
            "从2024年5月1日起，苹果要求所有使用特定API的应用程序必须在隐私清单文件中声明其使用情况",
            "选择左侧表单中的选项来配置你的API使用情况和隐私营养标签",
            "右侧的XML会自动更新以反映你的选择"
        ]
    }
    
    # 保存分析结果
    with open(os.path.join(output_dir, "analysis.json"), "w", encoding="utf-8") as f:
        json.dump(analysis, f, ensure_ascii=False, indent=2)
    
    print("\n网站分析结果:")
    print(f"API类别数量: {len(analysis['API类别'])}")
    print("API类别列表:")
    for api in analysis['API类别']:
        print(f"- {api}")
    
    print(f"\n数据类型数量: {len(analysis['数据类型'])}")
    print("数据类型列表:")
    for dtype in analysis['数据类型']:
        print(f"- {dtype}")
    
    print(f"\n跟踪原因数量: {len(analysis['跟踪原因'])}")
    print("跟踪原因列表:")
    for reason in analysis['跟踪原因']:
        print(f"- {reason}")
    
    print(f"\n分析结果已保存到 {output_dir} 目录")
    return analysis

if __name__ == "__main__":
    website_url = "https://www.privacymanifest.dev/"
    analysis = analyze_privacy_manifest(website_url)
