from flask import Flask, request, jsonify, send_from_directory
import sqlite3
import hashlib
import os
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin, urlparse
import time
import re

app = Flask(__name__)

# 配置静态文件目录
app.static_folder = '.'

# 导入并注册证件照处理API蓝图
from idphoto.idphoto_api import idphoto_bp
app.register_blueprint(idphoto_bp)

# 初始化数据库
def init_db():
    conn = sqlite3.connect('users.db')
    cursor = conn.cursor()
    
    # 创建用户表
    cursor.execute('''
        CREATE TABLE IF NOT EXISTS users (
            id INTEGER PRIMARY KEY AUTOINCREMENT,
            username TEXT UNIQUE NOT NULL,
            password TEXT NOT NULL
        )
    ''')
    
    # 创建爬虫资源表
    cursor.execute('''
        CREATE TABLE IF NOT EXISTS crawler_resources (
            id INTEGER PRIMARY KEY AUTOINCREMENT,
            url TEXT NOT NULL,
            title TEXT,
            content_type TEXT NOT NULL,  -- 'text', 'image', 'video'
            content TEXT,  -- 文字内容或文件路径
            created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
        )
    ''')
    
    # 创建用户收藏表
    cursor.execute('''
        CREATE TABLE IF NOT EXISTS user_favorites (
            id INTEGER PRIMARY KEY AUTOINCREMENT,
            user_id INTEGER NOT NULL,
            resource_id INTEGER NOT NULL,
            created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
        )
    ''')
    
    conn.commit()
    conn.close()

# 获取数据库连接
def get_db_connection():
    conn = sqlite3.connect('users.db')
    conn.row_factory = sqlite3.Row
    return conn

# 用户注册接口
@app.route('/api/register', methods=['POST'])
def register():
    try:
        data = request.get_json()
        username = data.get('username')
        password = data.get('password')
        
        if not username or not password:
            return jsonify({'code': 400, 'message': '用户名和密码不能为空'}), 400
        
        # 密码加密
        hashed_password = hashlib.md5(password.encode()).hexdigest()
        
        conn = get_db_connection()
        cursor = conn.cursor()
        
        # 检查用户名是否已存在
        cursor.execute('SELECT id FROM users WHERE username = ?', (username,))
        if cursor.fetchone():
            conn.close()
            return jsonify({'code': 400, 'message': '用户名已存在'}), 400
        
        # 插入新用户
        cursor.execute('INSERT INTO users (username, password) VALUES (?, ?)', 
                      (username, hashed_password))
        conn.commit()
        conn.close()
        
        return jsonify({'code': 200, 'message': '注册成功'}), 200
    except Exception as e:
        print(f"注册错误: {e}")
        return jsonify({'code': 500, 'message': '服务器内部错误'}), 500

# 用户登录接口
@app.route('/api/login', methods=['POST'])
def login():
    try:
        data = request.get_json()
        username = data.get('username')
        password = data.get('password')
        
        if not username or not password:
            return jsonify({'code': 400, 'message': '用户名和密码不能为空'}), 400
        
        # 密码加密
        hashed_password = hashlib.md5(password.encode()).hexdigest()
        
        conn = get_db_connection()
        cursor = conn.cursor()
        
        # 查询用户
        cursor.execute('SELECT id FROM users WHERE username = ? AND password = ?', 
                      (username, hashed_password))
        user = cursor.fetchone()
        conn.close()
        
        if user:
            return jsonify({'code': 200, 'message': '登录成功'}), 200
        else:
            return jsonify({'code': 401, 'message': '用户名或密码错误'}), 401
    except Exception as e:
        print(f"登录错误: {e}")
        return jsonify({'code': 500, 'message': '服务器内部错误'}), 500

# 爬虫接口 - 文字内容
@app.route('/api/crawler/text', methods=['POST'])
def crawl_text():
    try:
        data = request.get_json()
        url = data.get('url')
        
        if not url:
            return jsonify({'code': 400, 'message': 'URL不能为空'}), 400
        
        # 获取网页内容
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }
        
        response = requests.get(url, headers=headers, timeout=10)
        response.encoding = response.apparent_encoding
        
        # 解析网页
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # 提取标题
        title = soup.title.string if soup.title else "无标题"
        
        # 提取文字内容
        # 移除script和style标签
        for script in soup(["script", "style"]):
            script.decompose()
        
        # 获取文本内容
        text_content = soup.get_text()
        # 清理文本
        lines = (line.strip() for line in text_content.splitlines())
        chunks = (phrase.strip() for line in lines for phrase in line.split("  "))
        text_content = ' '.join(chunk for chunk in chunks if chunk)
        
        # 保存到数据库
        conn = get_db_connection()
        cursor = conn.cursor()
        cursor.execute('''
            INSERT INTO crawler_resources (url, title, content_type, content)
            VALUES (?, ?, ?, ?)
        ''', (url, title, 'text', text_content))
        conn.commit()
        resource_id = cursor.lastrowid
        conn.close()
        
        return jsonify({
            'code': 200, 
            'message': '文字爬取成功',
            'data': {
                'id': resource_id,
                'url': url,
                'title': title,
                'content_type': 'text'
            }
        }), 200
    except Exception as e:
        print(f"文字爬取错误: {e}")
        return jsonify({'code': 500, 'message': f'爬取失败: {str(e)}'}), 500

# 爬虫接口 - 图片
@app.route('/api/crawler/image', methods=['POST'])
def crawl_image():
    try:
        data = request.get_json()
        url = data.get('url')
        
        if not url:
            return jsonify({'code': 400, 'message': 'URL不能为空'}), 400
        
        # 获取网页内容
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }
        
        response = requests.get(url, headers=headers, timeout=10)
        response.encoding = response.apparent_encoding
        
        # 解析网页
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # 提取标题
        title = soup.title.string if soup.title else "无标题"
        
        # 创建图片存储目录
        if not os.path.exists('images'):
            os.makedirs('images')
        
        # 查找所有图片
        img_tags = soup.find_all('img')
        image_count = 0
        
        for img in img_tags:
            img_url = img.get('src')
            if not img_url:
                continue
                
            # 处理相对URL
            img_url = urljoin(url, img_url)
            
            try:
                # 下载图片
                img_response = requests.get(img_url, headers=headers, timeout=10)
                if img_response.status_code == 200:
                    # 获取文件扩展名
                    parsed_url = urlparse(img_url)
                    file_extension = os.path.splitext(parsed_url.path)[1] or '.jpg'
                    
                    # 确保文件扩展名是有效的
                    if file_extension.lower() not in ['.jpg', '.jpeg', '.png', '.gif', '.bmp', '.webp']:
                        file_extension = '.jpg'
                    
                    # 生成文件名
                    filename = f"images/image_{int(time.time() * 1000)}_{image_count}{file_extension}"
                    
                    # 保存图片
                    with open(filename, 'wb') as f:
                        f.write(img_response.content)
                    
                    # 保存到数据库
                    conn = get_db_connection()
                    cursor = conn.cursor()
                    cursor.execute('''
                        INSERT INTO crawler_resources (url, title, content_type, content)
                        VALUES (?, ?, ?, ?)
                    ''', (img_url, f"{title}_图片{image_count}", 'image', filename))
                    conn.commit()
                    conn.close()
                    
                    image_count += 1
            except Exception as e:
                print(f"下载图片失败 {img_url}: {e}")
                continue
        
        return jsonify({
            'code': 200, 
            'message': f'图片爬取成功，共爬取{image_count}张图片',
            'data': {
                'count': image_count,
                'title': title,
                'content_type': 'image'
            }
        }), 200
    except Exception as e:
        print(f"图片爬取错误: {e}")
        return jsonify({'code': 500, 'message': f'爬取失败: {str(e)}'}), 500

# 爬虫接口 - 视频
@app.route('/api/crawler/video', methods=['POST'])
def crawl_video():
    try:
        data = request.get_json()
        url = data.get('url')
        
        if not url:
            return jsonify({'code': 400, 'message': 'URL不能为空'}), 400
        
        # 获取网页内容
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }
        
        response = requests.get(url, headers=headers, timeout=10)
        response.encoding = response.apparent_encoding
        
        # 解析网页
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # 提取标题
        title = soup.title.string if soup.title else "无标题"
        
        # 创建视频存储目录
        if not os.path.exists('videos'):
            os.makedirs('videos')
        
        # 查找所有视频链接 (支持多种格式)
        video_count = 0
        
        # 方法1: 查找video标签
        video_tags = soup.find_all('video')
        for video in video_tags:
            video_src = video.get('src')
            if video_src:
                video_urls = [video_src]
            else:
                # 查找source标签
                sources = video.find_all('source')
                video_urls = [source.get('src') for source in sources if source.get('src')]
            
            for video_url in video_urls:
                if not video_url:
                    continue
                    
                # 处理相对URL
                video_url = urljoin(url, video_url)
                
                try:
                    # 下载视频
                    video_response = requests.get(video_url, headers=headers, timeout=30)
                    if video_response.status_code == 200:
                        # 获取文件扩展名
                        parsed_url = urlparse(video_url)
                        file_extension = os.path.splitext(parsed_url.path)[1] or '.mp4'
                        
                        # 确保文件扩展名是有效的
                        if file_extension.lower() not in ['.mp4', '.avi', '.mov', '.wmv', '.flv', '.webm']:
                            file_extension = '.mp4'
                        
                        # 生成文件名
                        filename = f"videos/video_{int(time.time() * 1000)}_{video_count}{file_extension}"
                        
                        # 保存视频
                        with open(filename, 'wb') as f:
                            f.write(video_response.content)
                        
                        # 保存到数据库
                        conn = get_db_connection()
                        cursor = conn.cursor()
                        cursor.execute('''
                            INSERT INTO crawler_resources (url, title, content_type, content)
                            VALUES (?, ?, ?, ?)
                        ''', (video_url, f"{title}_视频{video_count}", 'video', filename))
                        conn.commit()
                        conn.close()
                        
                        video_count += 1
                except Exception as e:
                    print(f"下载视频失败 {video_url}: {e}")
                    continue
        
        # 方法2: 查找页面中的视频链接 (通过正则表达式)
        video_extensions = r'\.(mp4|avi|mov|wmv|flv|webm)(\?.*)?$'
        links = soup.find_all('a', href=True)
        for link in links:
            href = link['href']
            if re.search(video_extensions, href, re.IGNORECASE):
                video_url = urljoin(url, href)
                try:
                    # 下载视频
                    video_response = requests.get(video_url, headers=headers, timeout=30)
                    if video_response.status_code == 200:
                        # 获取文件扩展名
                        parsed_url = urlparse(video_url)
                        file_extension = os.path.splitext(parsed_url.path)[1] or '.mp4'
                        
                        # 确保文件扩展名是有效的
                        if file_extension.lower() not in ['.mp4', '.avi', '.mov', '.wmv', '.flv', '.webm']:
                            file_extension = '.mp4'
                        
                        # 生成文件名
                        filename = f"videos/video_{int(time.time() * 1000)}_{video_count}{file_extension}"
                        
                        # 保存视频
                        with open(filename, 'wb') as f:
                            f.write(video_response.content)
                        
                        # 保存到数据库
                        conn = get_db_connection()
                        cursor = conn.cursor()
                        cursor.execute('''
                            INSERT INTO crawler_resources (url, title, content_type, content)
                            VALUES (?, ?, ?, ?)
                        ''', (video_url, f"{title}_视频链接{video_count}", 'video', filename))
                        conn.commit()
                        conn.close()
                        
                        video_count += 1
                except Exception as e:
                    print(f"下载视频链接失败 {video_url}: {e}")
                    continue
        
        return jsonify({
            'code': 200, 
            'message': f'视频爬取成功，共爬取{video_count}个视频',
            'data': {
                'count': video_count,
                'title': title,
                'content_type': 'video'
            }
        }), 200
    except Exception as e:
        print(f"视频爬取错误: {e}")
        return jsonify({'code': 500, 'message': f'爬取失败: {str(e)}'}), 500

# 提供图片和视频文件访问
@app.route('/images/<path:filename>')
def serve_image(filename):
    try:
        return send_from_directory('images', filename)
    except Exception as e:
        print(f"图片访问错误: {e}")
        return jsonify({'code': 404, 'message': '图片不存在'}), 404

@app.route('/videos/<path:filename>')
def serve_video(filename):
    try:
        return send_from_directory('videos', filename)
    except Exception as e:
        print(f"视频访问错误: {e}")
        return jsonify({'code': 404, 'message': '视频不存在'}), 404

# 提供处理后的证件照访问
@app.route('/processed_photos/<path:filename>')
def serve_processed_photo(filename):
    try:
        return send_from_directory('processed_photos', filename)
    except Exception as e:
        print(f"处理后的图片访问错误: {e}")
        return jsonify({'code': 404, 'message': '图片不存在'}), 404

# 获取所有爬虫资源
@app.route('/api/crawler/resources', methods=['GET'])
def get_crawler_resources():
    try:
        conn = get_db_connection()
        cursor = conn.cursor()
        cursor.execute('''
            SELECT id, url, title, content_type, created_at
            FROM crawler_resources
            ORDER BY created_at DESC
        ''')
        resources = cursor.fetchall()
        conn.close()
        
        # 转换为字典列表
        resources_list = [dict(resource) for resource in resources]
        
        return jsonify({
            'code': 200,
            'message': '获取资源成功',
            'data': resources_list
        }), 200
    except Exception as e:
        print(f"获取资源错误: {e}")
        return jsonify({'code': 500, 'message': f'获取资源失败: {str(e)}'}), 500

# 获取特定资源详情
@app.route('/api/crawler/resource/<int:resource_id>', methods=['GET'])
def get_crawler_resource_detail(resource_id):
    try:
        conn = get_db_connection()
        cursor = conn.cursor()
        cursor.execute('''
            SELECT id, url, title, content_type, content, created_at
            FROM crawler_resources
            WHERE id = ?
        ''', (resource_id,))
        resource = cursor.fetchone()
        conn.close()
        
        if resource:
            resource_dict = dict(resource)
            # 对于图片和视频资源，确保路径格式正确
            if resource_dict['content_type'] in ['image', 'video']:
                content = resource_dict['content']
                if content and not content.startswith('http'):
                    # 确保路径以正确的格式存储
                    if content.startswith('/'):
                        resource_dict['content'] = content[1:] if content.startswith('/') else content
            return jsonify({
                'code': 200,
                'message': '获取资源详情成功',
                'data': resource_dict
            }), 200
        else:
            return jsonify({'code': 404, 'message': '资源不存在'}), 404
    except Exception as e:
        print(f"获取资源详情错误: {e}")
        return jsonify({'code': 500, 'message': f'获取资源详情失败: {str(e)}'}), 500

# 用户收藏资源接口
@app.route('/api/crawler/favorite', methods=['POST'])
def add_to_favorite():
    try:
        data = request.get_json()
        resource_id = data.get('resource_id')
        
        if not resource_id:
            return jsonify({'code': 400, 'message': '资源ID不能为空'}), 400
        
        # 这里简化处理，实际应用中需要验证用户身份并记录收藏关系
        conn = get_db_connection()
        cursor = conn.cursor()
        cursor.execute('''
            INSERT INTO user_favorites (user_id, resource_id)
            VALUES (?, ?)
        ''', (1, resource_id))  # 假设用户ID为1
        conn.commit()
        conn.close()
        
        return jsonify({'code': 200, 'message': '添加到收藏列表成功'}), 200
    except Exception as e:
        print(f"添加到收藏列表错误: {e}")
        return jsonify({'code': 500, 'message': f'操作失败: {str(e)}'}), 500

# 删除单个资源
@app.route('/api/crawler/resource/<int:resource_id>', methods=['DELETE'])
def delete_resource(resource_id):
    try:
        conn = get_db_connection()
        cursor = conn.cursor()
        
        # 先获取资源信息
        cursor.execute('SELECT content, content_type FROM crawler_resources WHERE id = ?', (resource_id,))
        resource = cursor.fetchone()
        
        if not resource:
            conn.close()
            return jsonify({'code': 404, 'message': '资源不存在'}), 404
            
        content = resource['content']
        content_type = resource['content_type']
        
        # 删除数据库记录
        cursor.execute('DELETE FROM crawler_resources WHERE id = ?', (resource_id,))
        conn.commit()
        conn.close()

        # 删除对应的文件（如果存在且是图片或视频）
        if content_type in ['image', 'video'] and content:
            if os.path.exists(content):
                try:
                    os.remove(content)
                except Exception as e:
                    print(f"删除文件失败 {content}: {e}")

        return jsonify({'code': 200, 'message': '删除成功'}), 200
    except Exception as e:
        print(f"删除资源错误: {e}")
        return jsonify({'code': 500, 'message': f'删除失败: {str(e)}'}), 500

# 删除多个资源
@app.route('/api/crawler/resources/delete', methods=['POST'])
def delete_resources():
    try:
        data = request.get_json()
        resource_ids = data.get('resource_ids')

        if not resource_ids:
            return jsonify({'code': 400, 'message': '资源ID列表不能为空'}), 400

        conn = get_db_connection()
        cursor = conn.cursor()
        
        # 获取要删除的资源信息
        placeholders = ','.join('?' * len(resource_ids))
        cursor.execute(f'SELECT content, content_type FROM crawler_resources WHERE id IN ({placeholders})', resource_ids)
        resources = cursor.fetchall()
        
        # 删除数据库记录
        cursor.execute(f'DELETE FROM crawler_resources WHERE id IN ({placeholders})', resource_ids)
        conn.commit()
        conn.close()

        # 删除对应的文件（如果存在）
        for resource in resources:
            content = resource['content']
            content_type = resource['content_type']
            if content_type in ['image', 'video'] and content:
                if os.path.exists(content):
                    try:
                        os.remove(content)
                    except Exception as e:
                        print(f"删除文件失败 {content}: {e}")

        return jsonify({'code': 200, 'message': '删除成功'}), 200
    except Exception as e:
        print(f"删除资源错误: {e}")
        return jsonify({'code': 500, 'message': f'删除失败: {str(e)}'}), 500

# 删除全部资源
@app.route('/api/crawler/resources/all', methods=['DELETE'])
def delete_all_resources():
    try:
        conn = get_db_connection()
        cursor = conn.cursor()
        
        # 获取所有资源的文件路径
        cursor.execute('SELECT content, content_type FROM crawler_resources')
        resources = cursor.fetchall()

        # 删除数据库记录
        cursor.execute('DELETE FROM crawler_resources')
        conn.commit()
        conn.close()

        # 删除对应的文件（如果存在）
        for resource in resources:
            content = resource['content']
            content_type = resource['content_type']
            if content_type in ['image', 'video'] and content:
                if os.path.exists(content):
                    try:
                        os.remove(content)
                    except Exception as e:
                        print(f"删除文件失败 {content}: {e}")

        # 清空图片和视频目录
        try:
            if os.path.exists('images'):
                for file in os.listdir('images'):
                    os.remove(os.path.join('images', file))
            if os.path.exists('videos'):
                for file in os.listdir('videos'):
                    os.remove(os.path.join('videos', file))
        except Exception as e:
            print(f"清空目录失败: {e}")

        return jsonify({'code': 200, 'message': '删除成功'}), 200
    except Exception as e:
        print(f"删除资源错误: {e}")
        return jsonify({'code': 500, 'message': f'删除失败: {str(e)}'}), 500        

if __name__ == '__main__':
    # 初始化数据库
    init_db()
    # 运行应用
    app.run(host='localhost', port=5000, debug=True)