import subprocess
from os import path,listdir
from fake_useragent import UserAgent
import threading
import urllib3
import requests
from django.shortcuts import render, redirect
from lxml import etree

# for InsecureRequestWarning
urllib3.disable_warnings()


'''
作用：主菜单，显示下面各网页的链接，不然记名字太难了
'''
range_a = 0

def home(request):
    if request.method == 'GET':
        url_list = [
            'get_static/',
            'post_pic/',
            'test_html',
            'post_html',            
        ]
        return render(request, 'home.html', {'url_list': url_list})


'''
作用：显示static下的图片
'''


def get_static(request):
    if request.method == 'GET':
        # 本py文件的路径
        print('__file__=',__file__)

        # 本py文件的所在目录的路径
        parentPath = path.dirname(__file__)
        print('parentPath=',parentPath)

        # 要找的路径
        staticPath = path.abspath(path.join(parentPath, '../static'))
        print('staticPath=', staticPath)

        imgs = []
        for img in listdir(staticPath):
            if img.endswith('.jpg'):
                imgs.append(int(img[:-4]))
        imgs.sort()
        print(imgs)
        return render(request, 'get_static.html', {'imgs': imgs})

'''
作用：删除static下的图片
'''


def del_static():
    command1 = 'cd ./static && del /q * & type NUL > .keep'
    print('[command1]', command1)
    subprocess.run(command1, shell=True)


'''
作用：根据图片url下载图片，并存储到static中
参数：i是表示存储路径的唯一标识
'''


def download_pic(url, i):
    print('[pic-url]', url)
    save_path = 'static/' + str(i) + '.jpg'
    # print('[save_path]', save_path)
    try:
        # headers
        headers = {'User-Agent': UserAgent().random}
        # print(headers)
        response_file = requests.get(url=url, headers=headers, verify=False)

        # 判断状态码
        if response_file.status_code == 200:
            with open(save_path, 'wb') as fp:
                fp.write(response_file.content)
        else:
            print(url, '404')
            pass
    except Exception as e:
        print(url, e)
        pass


'''
作用：下载网页，返回网页中第一个图片的url
'''


def down_html_inside_pic(html_url, index, i):
    print('[html_url]', html_url)
    try:
        # headers
        headers = {'User-Agent': UserAgent().random}
        # print(headers)
        response_file = requests.get(
            url=html_url, headers=headers, verify=False)

        # 判断状态码
        if response_file.status_code == 200:
            html = response_file.text
            elemt = etree.HTML(html)
            xpath = '//img[@src]'
            pic_path_list = elemt.xpath(xpath)
            pic_path = pic_path_list[index].get('src')
            if pic_path:
                download_pic(pic_path, i)
        else:
            print(html_url, '404')
            pass
    except Exception as e:
        print(html_url, e)
        pass


'''
作用：根据图片的url来下载图片
'''


def post_pic(request):
    if request.method == "GET":
        return render(request, 'post_pic.html')
    elif request.method == "POST":
        del_static()

        range_a = request.POST.get('a')
        range_b = request.POST.get('b')
        pic_url_base = request.POST.get('pic_url_base')
        print('[pic-url]', pic_url_base)

        if range_a is None or range_a == '':
            range_a = 2
        else:
            range_a = int(range_a)
        print('[a]', range_a)
        if range_b is None or range_b == '':
            range_b = range_a + 50
        else:
            range_b = int(range_b)
        print('[b]', range_b)

        url_list = []
        for i in range(range_a, range_b+1):
            pic_url = pic_url_base.format(i)
            print(pic_url)
            url_list.append(pic_url)

        threads = []
        for i, url in enumerate(url_list):
            # print(url,i)
            t = threading.Thread(target=download_pic, args=(url, range_a + i))
            threads.append(t)

        for t in threads:
            t.setDaemon(True)
            t.start()

        for t in threads:
            t.join()

        return redirect('/get_static')


'''
作用：根据网页的url，显示图片是第几个
'''


def test_html(request):
    if request.method == "GET":
        return render(request, 'test_html.html')
    elif request.method == "POST":
        del_static()

        html_url = request.POST.get('html_url')
        print('[html_url]', html_url)

        index = request.POST.get('index')
        if index is None or index == '':
            index = -1
        else:
            index = int(index)
        print('[index]', index)

        # headers
        headers = {'User-Agent': UserAgent().random}
        # print(headers)
        response_file = requests.get(
            url=html_url, headers=headers, verify=False
        )

        # 判断状态码
        if response_file.status_code == 200:
            html = response_file.text
            # print(html)
            elemt = etree.HTML(html)
            xpath = '//img[@src]'
            pic_path_list = elemt.xpath(xpath)
            if index == -1:
                for i in range(len(pic_path_list)):
                    pic_path = pic_path_list[i].get('src')
                    download_pic(pic_path, i)
            else:
                pic_path = pic_path_list[index].get('src')
                download_pic(pic_path, 0)
        else:
            print(html_url, '404')
            pass
        return redirect('/get_static')


'''
作用：根据网页的url来下载第一个网页中的图片
'''


def post_html(request):
    if request.method == "GET":
        return render(request, 'post_html.html')
    elif request.method == "POST":
        del_static()

        range_a = request.POST.get('a')
        range_b = request.POST.get('b')
        html_url = request.POST.get('html_url')
        index = request.POST.get('index')

        if range_a is None or range_a == '':
            range_a = 2
        else:
            range_a = int(range_a)
        if range_b is None or range_b == '':
            range_b = range_a + 50
        else:
            range_b = int(range_b)
        if index is None or index == '':
            index = -1
        else:
            index = int(index)

        url_list = []
        for i in range(range_a, range_b+1):
            url2 = html_url.format(i)
            # print(url2)
            url_list.append(url2)

        threads = []
        for i, url in enumerate(url_list):
            # print(url,i)
            t = threading.Thread(
                target=down_html_inside_pic, args=(url, index, range_a + i))
            threads.append(t)

        for t in threads:
            t.setDaemon(True)
            t.start()

        for t in threads:
            t.join()

        return redirect('/get_static')
