# -*- coding: utf-8 -*-
from django.shortcuts import render, redirect # type: ignore
from django.http import HttpResponse,FileResponse
from django.contrib.auth import authenticate, login, logout
from django.contrib import messages
from urllib.parse import urlparse
import validators
import requests
import shlex
import logging
import jieba.analyse
import re,os
import zipfile
from django.conf import settings
from .models import *
import shutil
import pyzipper
import jieba
from django.http import Http404
import uuid
import subprocess
from bs4 import BeautifulSoup
from urllib.parse import urljoin
import requests

logger = logging.getLogger(__name__)

def change_person_info(request):
    if request.method == 'POST':
        if request.user.is_authenticated:
            # username = request.POST.get('username',None)
            # if not username:
            #     messages.warning(request, '用户名不能为空')
            #     referer_url = request.META.get('HTTP_REFERER', '/')
            #     # 重定向回来源页面
            #     return redirect(referer_url)
            first_name = request.POST.get('first_name',None)
            if not first_name:
                messages.warning(request, '昵称不能为空')
                referer_url = request.META.get('HTTP_REFERER', '/')
                # 重定向回来源页面
                return redirect(referer_url)
            request.user.first_name = first_name
            # request.user.username = username
            request.user.save()
            messages.success(request, '修改成功')
            referer_url = request.META.get('HTTP_REFERER', '/')
            # 重定向回来源页面
            return redirect(referer_url)
    else:
        messages.error(request, '修改失败')
        referer_url = request.META.get('HTTP_REFERER', '/')
        # 重定向回来源页面
        return redirect(referer_url)


def clear_server(request):
    if request.method == 'GET':
        if not request.user.is_authenticated:
            messages.error(request, '请先登录')
            return redirect('/')
        if not request.user.is_superuser:
            messages.error(request, '权限不足')
            return redirect('/')
        inss = StaticFile.objects.all()
        path_names = [item.path_name for item in inss]
        _path_names = []
        for item in path_names:
            _item = item.replace("/","")
            _path_names.append(_item)
        nginx_dir = os.path.join(settings.STATIC_NGINX_URL)
        count = 0
        for dir in os.listdir(nginx_dir):
            temp_dir = os.path.join(nginx_dir,dir)
            if dir not in _path_names and os.path.isdir(temp_dir):
                shutil.rmtree(os.path.join(nginx_dir, dir))
                count += 1
        messages.success(request, f'清理成功，成功删除{count}个文件夹')
        return redirect('/')
    else:
        messages.error(request, '方法允许')
        return redirect('/')

def edit_static_file(request):
    if request.method == 'POST':
        id = request.POST.get('id',None)
        if not id:
            messages.warning(request,"ID不能为空")
            return redirect('/')
        file_name = request.POST.get('file_name',None)
        if not file_name:
            messages.warning(request,"文件名称不能为空")
            return redirect('/')
        source_url = request.POST.get('source_url',None)
        if not source_url:
            messages.warning(request,"文件地址不能为空")
            return redirect('/')
        try:
            ins = StaticFile.objects.get(id=id)
            ins.file_name = file_name
            ins.source_url = source_url
            ins.save()
            messages.success(request,"修改成功")
            return redirect('/')
        except:
            messages.warning(request,f"未找到id为${id}的记录")
            return redirect('/')
    else:
        messages.warning(request, '请求方式错误')
        return redirect('/')
   

def delete_static_file(request, id):
    try:
        static_file = StaticFile.objects.get(id=id)
        static_file.delete()
        messages.success(request, '删除成功')
        return redirect('/')
    except Exception as e:
        messages.error(request, '删除失败')
        return redirect('/')

def serve_static_page(request, path):
    # 构造文件夹路径
    _path = path.replace("/","")
    file_path = os.path.join(settings.STATIC_NGINX_URL, _path, 'index.html')
    
    # 检查文件是否存在
    if not os.path.exists(file_path):
        raise Http404("Page not found")
    
    # 读取文件内容并返回
    with open(file_path, 'r', encoding='utf-8') as file:
        content = file.read()
    
    return HttpResponse(content)


def serve_static_file(request, path, filename):
    _path = path.replace("/","")
    # 构造文件夹路径
    file_path = os.path.join(settings.STATIC_NGINX_URL, _path, 'static', filename)
    file_path = file_path.rstrip('/')
    # 检查文件是否存在
    if not os.path.exists(file_path):
        print(f"File not found: {file_path}")  # 调试输出
        raise Http404("File not found")
    
    # 返回文件响应
    return FileResponse(open(file_path, 'rb'))

def search(request):
    method = request.method
    if method == "GET":
        # messages.warning(request, "不允许使用GET方法")
        return redirect('index')
    if request.user.is_authenticated:
        search_key = request.POST.get('search',None)
        if not search_key:
            # messages.warning(request, "请输入搜索关键字")
            # 获取来源URL，如果没有来源则默认跳转到主页
            referer_url = request.META.get('HTTP_REFERER', '/')
            # 重定向回来源页面
            return redirect(referer_url)
        ins = KeyWord.objects.filter(keywords__icontains=search_key)
        server_info = ServerInfo.objects.filter(created_by=request.user).first()
        static_file_ins = StaticFile.objects.filter(id__in=ins.values('static_file_id'))
        static_file_ins1 = StaticFile.objects.filter(file_name__icontains=search_key)
        static_file_ins = static_file_ins.union(static_file_ins1)
        for item in static_file_ins:
            agreement = "http"
            if server_info.select_type == "https":
                agreement = "https"
            if server_info.port:
                item.full_url = f"{agreement}://" + server_info.ip + ":" + server_info.port +item.path_name
            else:
                item.full_url = f"{agreement}://" + server_info.ip + item.path_name
        # messages.success(request,"搜索成功")
        return render(request, 'index.html', {'instances': static_file_ins,"search_value":search_key})
    else:
        messages.warning(request, '请先登录！')
        # 获取来源URL，如果没有来源则默认跳转到主页
        referer_url = request.META.get('HTTP_REFERER', '/')
        # 重定向回来源页面
        return redirect(referer_url)


def change_password(request):
    method = request.method
    if method == "POST":
        old_password = request.POST.get("old_password",None)
        if not old_password:
            messages.warning(request,"请输入旧密码")
            return redirect("index")
        new_password = request.POST.get("new_password",None)
        if not new_password:
            messages.warning(request,"请输入新密码")
            return redirect("index")
        new_password_confirm = request.POST.get("new_password_confirm",None)
        if not new_password_confirm:
            messages.warning(request,"请输入新密码确认")
            return redirect("index")
        
        if request.user.check_password(old_password):
            if old_password == new_password:
                messages.warning(request,"新密码不能与旧密码相同")
                return redirect("index")
            if new_password == new_password_confirm:
                request.user.set_password(new_password)
                request.user.save()
                messages.success(request,"修改成功，请重新登录")
                return redirect("login")
            else:
                messages.warning(request,"新密码和确认密码不一致")
                return redirect("index")
        else:
            messages.warning(request,"旧密码错误")
            return redirect("index")
    else:
        # 从request获取refer
        refer = request.META.get('HTTP_REFERER',None)
        messages.warning(request,"方法不允许")
        return redirect(refer)
    
def user_logout(request):
    logout(request)
    # 退出后重定向到首页或其他页面
    messages.success(request,"退出成功")
    return redirect('login')

def server_info(request):
    # instance = ServerInfo.objects.all().delete()
    method = request.method
    if method == "POST":
        server_name = request.POST.get("server_name",None)
        ip = request.POST.get("ip",None)
        port = request.POST.get("port",None)
        desc = request.POST.get("desc","")
        select_type = request.POST.get("select_type","")
        
        try:
            instance = ServerInfo.objects.get(created_by=request.user)
            instance.select_type = select_type
            instance.server_name = server_name
            instance.ip = ip
            instance.port = port
            instance.desc = desc
            instance.save()
            messages.success(request,"更新成功")
            return render(request, 'server_info.html',{"instance":instance})  
        except Exception as e:
            print("e:",e)
            instance = ServerInfo.objects.create(
                select_type = select_type,
                server_name=server_name,
                ip = ip,
                port = port,
                desc = desc,
                created_by = request.user
            )
            messages.success(request,"创建成功")
            return render(request, 'server_info.html',{"instance":instance})   
    try:
        instance = ServerInfo.objects.get(created_by=request.user)
    except Exception as e:
        instance = ServerInfo.objects.filter(created_by=request.user).first()
    
    return render(request, 'server_info.html',{"instance":instance})



def index(request):
    instances = StaticFile.objects.filter(created_by=request.user).order_by("-created_at")
    server_info = ServerInfo.objects.filter(created_by=request.user).first()
    if server_info:
        for item in instances:
            agreement = "http"
            if server_info.select_type == "https":
                agreement = "https"
            if server_info.port:
                item.full_url = f"{agreement}://" + server_info.ip + ":" + server_info.port +item.path_name
            else:
                item.full_url = f"{agreement}://" + server_info.ip + item.path_name 
    
    return render(request, 'index.html',{"instances":instances,"server_info":server_info})

def login_view(request):
    if request.method == 'POST':
        username = request.POST['username']
        password = request.POST['password']
        user = authenticate(username=username, password=password)
        
        if user is not None:
            login(request, user)
            messages.success(request, '登录成功！')
            return redirect('index')  # 修改为登录成功后跳转的页面名字或者URL
        else:
            messages.warning(request, '登录失败，请检查用户名和密码！')
    
    return render(request, 'login.html')

def is_valid_url(url):
    return validators.url(url)
def is_valid_string(input_str):
    # 正则表达式：^[0-9a-zA-Z_]+$
    pattern = r'^[0-9a-zA-Z_]+$'
    return bool(re.match(pattern, input_str))

def check_url_accessible(url):
    try:
        response = requests.head(url, timeout=5)  # 使用 HEAD 请求获取响应头部信息
        return response.status_code == 200  # 如果状态码为 200 表示正常访问
    except requests.RequestException:
        return False  # 发生异常则返回 False

def split_file_name(file_name):
    # 获取文件名和扩展名
    match = re.search(r'\.([^.]*)$', file_name)
    if match:
        ext = match.group(1)
        name = file_name[:match.start()]
    else:
        ext = ""
        name = file_name
    return name, ext

# def extract_zip_without_top_level(zip_path, target_dir):
#     with zipfile.ZipFile(zip_path, 'r') as zip_ref:
#         zip_ref.extractall(target_dir)


def extract_zip_without_top_level(zip_path, target_dir):
    with pyzipper.AESZipFile(zip_path, 'r') as zip_ref:
        for file_info in zip_ref.infolist():
            try:
                # 尝试用 UTF-8 解码
                file_info.filename = file_info.filename.encode('cp437').decode('utf-8')
            except (UnicodeDecodeError, UnicodeEncodeError):
                # 如果 UTF-8 失败，尝试使用 GBK
                file_info.filename = file_info.filename.encode('cp437').decode('gbk', errors='ignore')

            # 提取文件
            zip_ref.extract(file_info, target_dir)


def move_directory_contents_up(source_dir):
    # 获取上层目录
    parent_dir = os.path.dirname(source_dir)
    target_dir = os.path.dirname(parent_dir)
    
    # 确保目标目录存在
    os.makedirs(target_dir, exist_ok=True)

    # 移动目录中的所有内容到目标目录
    for item in os.listdir(source_dir):
        source_item = os.path.join(source_dir, item)
        target_item = os.path.join(parent_dir, item)
        shutil.move(source_item, target_item)

    # 删除原来的目录
    shutil.rmtree(source_dir)
    # print(f"Moved contents of {source_dir} to {parent_dir} and deleted  {source_dir} ")

def find_first_html_file(target_dir):
    html_file_name = None
    html_file_path = None
    level = 0
    for root, dirs, files in os.walk(target_dir):
        # 计算当前目录层级
        current_level = root[len(target_dir):].count(os.sep)
        if current_level > level:
            level = current_level
        for file in files:
            if file.endswith(".html"):
                html_file_name = file
                html_file_path = root
                break
        if html_file_name:
            break

    return html_file_name, html_file_path, level

def replace_str(request,static_file_id,old_str, new_str, file_path):
    # 打开原始文件进行读取
    with open(file_path, 'r', encoding='utf-8') as file:
        # 读取所有行
        lines = file.readlines()

    words_list = []
    # 替换字符串并写入新文件
    with open(file_path, 'w', encoding='utf-8') as file:
        for line in lines:
            # 替换每一行中的目标字符串
            new_line = line
            if old_str and new_str:
                new_line = line.replace(old_str, new_str)
            # 写入替换后的行
            file.write(new_line)

            # 对每一行进行分词
            words = jieba.lcut(new_line)
             # 只保留中文词汇
            # chinese_words = [word for word in words if re.match(r'^[\u4e00-\u9fa5]+$', word)]

            words_list = words_list + words
    # 保存分词结果到数据库
    # 假设你的模型有一个字段 `content` 存储文本，一个字段 `keywords` 存储分词后的关键词
    words_list = list(set(words_list))
    KeyWord.objects.create(content="", keywords=",".join(words_list),static_file_id = static_file_id,created_by=request.user)

def handle_upload_file(request, path, file, _name,_source_url):
    filter_instances = StaticFile.objects.filter(path_name=path)
    if len(filter_instances) != 0:
        messages.warning(request, f"{path} 路径已存在")
        return False
    user = request.user
    try:
        if request.method == 'POST' and request.FILES.get('file'):
            file = request.FILES['file']
            file_name = file.name
            
            name,ext = split_file_name(file_name)
            
            # 验证文件类型
            if ext.lower() != "zip":
                messages.warning(request, f"{ext} 文件类型不正确")
                return False
            
            logger.info(f"处理文件: {file_name}")
            _path = path.replace("/","")
            # 解压文件，得到静态资源
            target_dir = os.path.join(settings.STATIC_NGINX_URL,_path)
            os.makedirs(target_dir, exist_ok=True)
            print("target_dir",target_dir)
            
            try:
                # 打开 ZIP 文件并解压到目标目录
                extract_zip_without_top_level(file, target_dir)
                # 在target_dir这一层的目录中找到.html文件读取html的文件名
                html_file_name, html_file_path, level = find_first_html_file(target_dir)
                if level > 1:
                    messages.warning(request, f"{path} 压缩文件夹层级不正确")
                    shutil.rmtree(target_dir)
                    return False
                if html_file_name and level == 1:
                    move_directory_contents_up(html_file_path)
                if html_file_name is None:
                    messages.warning(request, f"{path} 路径下没有找到html文件")
                    return False
                html_name,html_ext = split_file_name(html_file_name)
                print(os.listdir(target_dir),"os.listdir(target_dir)")
                old_html_file_name = None
                for file in os.listdir(target_dir):
                    temp_path = os.path.join(target_dir,file)

                    if file == "__MACOSX":
                        shutil.rmtree(temp_path)
                        continue
                    if os.path.isfile(temp_path):
                        os.rename(temp_path,os.path.join(target_dir,"index.html"))
                    elif os.path.isdir(temp_path):
                        print(temp_path,"temp_path")
                        old_html_file_name = file
                        os.rename(temp_path,os.path.join(target_dir,"static"))
                # old_html_file_name,_ = split_file_name(old_html_file_name)
                print(old_html_file_name,"old_html_file_name")
                print(os.path.join(target_dir,"index.html"),"index.html")
                ins = StaticFile.objects.create(
                    path_name = path,
                    file_name = _name,
                    source_url = _source_url,
                    created_by = user

                )
                replace_str(request,ins.id,old_html_file_name,'static',os.path.join(target_dir,"index.html"))
                # messages.success(request, "添加成功")
                return True
            
            except Exception as e:
                messages.warning(request, f"zip解压缩失败: {str(e)}")
                return False
        
        else:
            messages.warning(request, f"未找到上传的文件")
            return False
    
    except Exception as e:
        messages.error(request, f"处理文件时发生错误: {str(e)}")
        logger.error(f"处理文件时发生错误: {str(e)}")
        return False
# Create your views here.
def add(request):
    method = request.method
    if method == "POST":
        _type = request.POST.get("type",None)
        _url = request.POST.get("url",None)
        _file = request.FILES.get('file')
        _souerce_url = request.POST.get("source_url",None)
        # _path = request.POST.get("path",None)
        _path = "/" + str(uuid.uuid4().hex)[0:9]
        _name = request.POST.get("name",None)
        if _name is None:
            messages.warning(request,"请输入资源名称")
            return render(request, 'add.html')
        # _path = _path.replace(" ","").replace("/","")
        # _path = _path.lower()
        # if not is_valid_string(_path):
        #     messages.warning(request,"路径只能是数字、大小写字母、_")
        #     return render(request, 'add.html')
        # if not _path.startswith("/"):
        #     _path = "/" + _path
        # if _path == "/nginx" or _path == "/nginx/":
        #     messages.warning(request,"路径不能是/nginx")
        #     return render(request, 'add.html')
        print(_path,'---')
        if _type is None:
            messages.warning(request,"缺少类型")
            return render(request, 'add.html')
        if _type == 'file' and _file is None:
            messages.warning(request,"缺少文件")
            return render(request, 'add.html')
        if _url:
            if _type == 'url' and _url is None:
                messages.warning(request,"缺少url")
                return render(request, 'add.html')
            if not is_valid_url(_url):
                messages.warning(request,"url格式不正确")
                return render(request, 'add.html')
            if not check_url_accessible(_url):
                messages.warning(request,f"{_url} 无法正常访问")
                return render(request, 'add.html')
        # 判断是url还是上传文件
        if _type == "file":
            flag = handle_upload_file(request=request,path=_path, file=_file,_name=_name,_source_url=_souerce_url)
            if flag:
                messages.success(request,f"{_name} 添加成功")
                return redirect("index")
            return render(request, 'add.html')
        elif _type == "url":
            if not is_valid_url(_souerce_url):
                messages.warning(request,"url不合法")
                return redirect("add")
            flag = handle_url(request=request,url=_souerce_url,path=_path,_name=_name)
            return render(request, 'add.html')
        else:
            messages.warning(request,"类型错误")
            return render(request, 'add.html')
        # 开始请求数据
        messages.success(request,f"成功")
        return render(request, 'add.html')
    
    return render(request, 'add.html')

# 校验给的url是不是一个合格的url
def is_valid_url(url):
    """
    检查给定的 URL 是否为一个合格的 URL。
    参数:
    url (str): 需要验证的 URL。
    返回:
    bool: 如果 URL 合格返回 True，否则返回 False。
    """
    parsed = urlparse(url)
    return all([parsed.scheme, parsed.netloc])

# def download_website(request,url, local_directory):
#     """
#     使用 wget 下载整个网站到指定的本地目录。

#     参数:
#     url (str): 需要下载的网站 URL。
#     local_directory (str): 保存下载文件的本地目录。

#     返回:
#     None
#     """
#     command = f"wget -p -k -E -nd -P {local_directory} {url}"
#     try:
#         # 使用 subprocess.run() 执行命令
#         result = subprocess.run(command, shell=True, check=True, text=True,
#                                 stdout=subprocess.PIPE, stderr=subprocess.PIPE)
#         print("Command Output:")
#         print(result.stdout)
#         return True
#     except subprocess.CalledProcessError as e:
#         print(f"Error executing command: {e}")
#         print(f"Command returned non-zero exit status: {e.returncode}")
#         print(f"Command output: {e.output}")
#         messages.warning(request, f"Error executing command: {e}")
#         messages.warning(request, f"Command returned non-zero exit status: {e.returncode}")
#         messages.warning(request, f"Command output: {e.output}")
#         return False

def download_website(request, url, local_directory):
    """
    使用 wget 下载整个网站到指定的本地目录，包括图片等静态资源。

    参数:
    url (str): 需要下载的网站 URL。
    local_directory (str): 保存下载文件的本地目录。

    返回:
    bool: 下载成功返回 True，失败返回 False。
    """
    # 构建更为全面的 wget 命令，下载所有相关资源
    command = (
        f"wget -p -k -E -nd -H --span-hosts --convert-links "
        f"-P {shlex.quote(local_directory)} {shlex.quote(url)}"
    )
    
    try:
        # 使用 subprocess.run() 执行命令
        result = subprocess.run(command, shell=True, check=True, text=True,
                                stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        print("Command Output:")
        print(result.stdout)
        return True
    except subprocess.CalledProcessError as e:
        error_msg = f"Error executing command: {e}\nCommand output: {e.output}\nError details: {e.stderr}"
        print(error_msg)
        messages.warning(request, error_msg)
        return False

def get_browser_tab_title(url):
    # 发送HTTP请求获取网页内容
    response = requests.get(url)
    
    # 检查请求是否成功
    if response.status_code == 200:
        # 使用BeautifulSoup解析HTML内容
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # 提取并返回<title>标签的内容（即浏览器选项卡上的标题）
        title = soup.title.string if soup.title else "No title found"
        return title
    else:
        return f"未知名称"



def extract_keywords(request,static_file_id,file_path):
    # 读取文件内容
    with open(file_path, 'r', encoding='utf-8') as file:
        content = file.read()
    
    # 使用jieba提取关键词
    keywords = jieba.analyse.extract_tags(content, topK=None, withWeight=False)
    
    # 去重关键词
    unique_keywords = list(set(keywords))
    KeyWord.objects.create(content="", keywords=",".join(unique_keywords),static_file_id = static_file_id,created_by=request.user)

# 下载网页中所有的资源文件
def download_resource(url,resource_dir,tag, attribute):
    src = tag.get(attribute)
    if src:
        # 将相对路径转换为绝对路径
        resource_url = urljoin(url, src)
        resource_name = os.path.join(resource_dir, os.path.basename(src))
        print(resource_name,"resource_name")
        print(resource_url,"resource_url")
        if not os.path.exists(resource_name):
            os.makedirs(os.path.dirname(resource_name), exist_ok=True)
        try:
            # 下载资源文件
            resource_response = requests.get(resource_url)
            with open(resource_name, 'wb') as resource_file:
                resource_file.write(resource_response.content)
            print(f'已下载: {resource_name}')

            # 修改HTML中的路径
            tag[attribute] = os.path.join(resource_dir, os.path.basename(src))
            url_pattern = re.compile(r'url\((.*?)\)')
            
            # 如果是CSS文件，进一步处理其中的资源引用
            if resource_name.endswith('.css'):
                with open(resource_name, 'r', encoding='utf-8') as css_file:
                    css_content = css_file.read()

                urls = url_pattern.findall(css_content)
                for css_url in urls:
                    css_url = css_url.strip('\'"')
                    css_resource_url = urljoin(resource_url, css_url)
                    css_resource_name = os.path.join(resource_dir, os.path.basename(css_url))

                    try:
                        css_resource_response = requests.get(css_resource_url)
                        with open(css_resource_name, 'wb') as css_resource_file:
                            css_resource_file.write(css_resource_response.content)
                        print(f'已下载: {css_resource_name}')

                        # 修改CSS文件中的路径
                        css_content = css_content.replace(css_url, os.path.basename(css_url))
                    except requests.exceptions.RequestException as e:
                        print(f'无法下载 {css_resource_url}: {e}')

                # 保存修改后的CSS文件
                with open(resource_name, 'w', encoding='utf-8') as css_file:
                    css_file.write(css_content)
                
        except requests.exceptions.RequestException as e:
            print(f'无法下载 {resource_url}: {e}')



def replace_static_urls(html_content):
    # 匹配静态资源的正则表达式
    # 该正则表达式匹配 <link>、<script> 和 <img> 标签中的 URL
    pattern = re.compile(r'(<(link|script|img)[^>]+(?:href|src)=["\'])([^"\']+)["\']', re.IGNORECASE)
    
    # 替换静态资源 URL 为 ./static/xxxx.*
    def replace_match(match):
        prefix, tag, url = match.groups()
        # 获取文件名和扩展名
        file_name = url.split('/')[-1]
        # 替换为 ./static/xxxx.*
        new_url = f'./{file_name}'
        return f'{prefix}{new_url}"'

    # 使用正则表达式替换所有匹配的静态资源 URL
    replaced_html = pattern.sub(replace_match, html_content)
    
    return replaced_html
def handle_request_download_url(url,save_dir):
    print("url",url)
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'
    }
    response = requests.get(url,headers=headers, verify=False)
    print("response.text",response.content)
    print("response.status_code",response.status_code)
    soup = BeautifulSoup(response.text, 'html.parser')
    # 保存网页HTML内容
    html_path = os.path.join(save_dir,'index.html')
    with open(html_path, 'w', encoding='utf-8') as file:
        # contents = replace_static_urls(response.text)
        contents = response.text
        file.write(contents)
    
    # 匹配CSS文件中的url(...)
    # 处理HTML中的资源标签
    for tag in soup.find_all(['img', 'script', 'link']):
        if tag.name == 'link' and tag.get('rel') == ['stylesheet']:
            download_resource(url,save_dir,tag, 'href')
        elif tag.name == 'script' and tag.get('src'):
            download_resource(url,save_dir,tag, 'src')
        elif tag.name == 'img' and tag.get('src'):
            download_resource(url,save_dir,tag, 'src')
    return True
    

def handle_url(request,url,path,_name):
    _path = path.replace("/","")
    full_path = os.path.join(settings.STATIC_NGINX_URL,_path)
    if not os.path.exists(full_path):
        os.makedirs(full_path)
    flag = download_website(request,url,full_path)
    # flag = handle_request_download_url(url,full_path)
    # if not _name:
    #     _name = get_browser_tab_title(url)
    if flag:
        ins = StaticFile.objects.create(
            file_name=_name,
            path_name=path,
            source_url=url,
            created_by=request.user,
            entry_name = ""
        )
        if os.path.exists(full_path):
            for item in os.listdir(full_path):
                if item.endswith(".html"):
                    temp_path = os.path.join(full_path,item)
                    extract_keywords(request,ins.id,temp_path)
        messages.success(request,"下载成功")
    else:
        messages.success(request,"下载失败")