from django.shortcuts import render
from . import share
from .models import Tv, TvList, Movie, MovieList, Cartoon, CartoonList
import requests
import json
import re


def renew(request):
    if request.method == "GET":
        return render(request, "renew.html")
    else:
        result = request.POST.get('renew')
        if result == '电视剧':
            result = tv_first()
        elif result == '电影':
            result = movie_first()
        elif result == '动漫':
            result = cartoon_first()
        elif result == '代理':
            result = renew_proxies()
        else:
            result = '请选择更新的模块！！！'
        context = {
            "result": result
        }
        return render(request, "renew.html", context=context)


def tv_first():
    url = share.url
    headers = share.first_headers
    params = share.params
    proxy = share.proxy
    data = {
        "page_context": {
            "page_index": "0"
        },
        "page_params": {
            "page_id": "channel_list_second_page",
            "page_type": "operation",
            "channel_id": "100113",
            "filter_params": "sort=75",
            "page": "0"
        },
        "page_bypass_params": {
            "params": {
                "page_id": "channel_list_second_page",
                "page_type": "operation",
                "channel_id": "100113",
                "filter_params": "sort=75",
                "page": "0",
                "caller_id": "3000010",
                "platform_id": "2",
                "data_mode": "default",
                "user_mode": "default"
            },
            "scene": "operation",
            "abtest_bypass_id": "9ff841f2567550ec"
        }
    }
    data = json.dumps(data, separators=(',', ':'))
    response = requests.post(url, headers=headers, params=params, proxies=proxy, data=data)

    data_lists = response.json()["data"]["CardList"][1]["children_list"]["list"]["cards"]
    for data_list in data_lists:
        cid = data_list['params']['cid']
        new_pic_vt = data_list['params']['new_pic_vt']
        title = data_list['params']['title']
        second_title = data_list['params']['second_title']
        time_long = data_list['params']['timelong']

        try:
            Tv.objects.get(cid__exact=cid)
            if Tv.objects.get(cid__exact=cid).time_long != time_long:
                Tv.objects.get(cid__exact=cid).delete()
                Tv.objects.create(
                    cid=cid,
                    new_pic_vt=new_pic_vt,
                    title=title,
                    second_title=second_title,
                    time_long=time_long
                )
                tv_second(cid)
        except Tv.DoesNotExist:
            Tv.objects.create(
                cid=cid,
                new_pic_vt=new_pic_vt,
                title=title,
                second_title=second_title,
                time_long=time_long
            )
            tv_second(cid)
    return '电视剧已更新完毕'


def tv_second(cid):
    url = "https://v.qq.com/x/cover/" + cid + ".html"
    headers = share.second_headers
    proxy = share.proxy
    response = requests.get(url, headers=headers, proxies=proxy)

    obj1 = re.compile(r'video_ids":\[(?P<vid_list>.*?)],"', re.S)
    obj2 = re.compile(r'"(?P<vid>.*?)"', re.S)
    obj3 = re.compile(r'<title>(?P<screen>.*?)</title>', re.S)

    result1 = obj1.finditer(response.text)
    for it1 in result1:
        vid_list = it1.group('vid_list')
        result2 = obj2.finditer(vid_list)
        for it2 in result2:
            vid = it2.group('vid')
            screen_url = 'https://v.qq.com/x/cover/' + cid + '/' + vid + '.html'
            response2 = requests.get(screen_url, headers=headers, proxies=proxy)
            response2.encoding = 'utf-8'
            result3 = obj3.search(response2.text)
            screen = result3.group('screen')
            if '预告' not in screen and '彩蛋' not in screen:
                try:
                    TvList.objects.get(vid__exact=vid)
                except TvList.DoesNotExist:
                    play_title = screen.split('_')[0]
                    play_title = play_title.replace('第', ' 第')
                    title = play_title.split(' ')[-1]
                    TvList.objects.create(
                        cid=cid,
                        vid=vid,
                        href=screen_url,
                        title=title,
                        play_title=play_title
                    )


def movie_first():
    url = share.url
    headers = share.first_headers
    params = share.params
    proxy = share.proxy
    data = {
        "page_context": {
            "page_index": "0"
        },
        "page_params": {
            "page_id": "channel_list_second_page",
            "page_type": "operation",
            "channel_id": "100173",
            "filter_params": "sort=75",
            "page": "0"
        },
        "page_bypass_params": {
            "params": {
                "page_id": "channel_list_second_page",
                "page_type": "operation",
                "channel_id": "100173",
                "filter_params": "sort=75",
                "page": "0",
                "caller_id": "3000010",
                "platform_id": "2",
                "data_mode": "default",
                "user_mode": "default"
            },
            "scene": "operation",
            "abtest_bypass_id": "9ff841f2567550ec"
        }
    }
    data = json.dumps(data, separators=(',', ':'))
    response = requests.post(url, headers=headers, params=params, proxies=proxy, data=data)

    data_lists = response.json()["data"]["CardList"][1]["children_list"]["list"]["cards"]
    for data_list in data_lists:
        cid = data_list['params']['cid']
        new_pic_vt = data_list['params']['new_pic_vt']
        title = data_list['params']['title']
        second_title = data_list['params']['second_title']

        try:
            Movie.objects.get(cid__exact=cid)
        except Movie.DoesNotExist:
            Movie.objects.create(
                cid=cid,
                new_pic_vt=new_pic_vt,
                title=title,
                second_title=second_title,
            )
            movie_second(cid)
    return '电影已更新完毕'


def movie_second(cid):
    url = "https://v.qq.com/x/cover/" + cid + ".html"
    headers = share.second_headers
    proxy = share.proxy
    response = requests.get(url, headers=headers, proxies=proxy)

    obj1 = re.compile(r'video_ids":\[(?P<vid_list>.*?)],"', re.S)
    obj2 = re.compile(r'"(?P<vid>.*?)"', re.S)
    obj3 = re.compile(r'<title>(?P<screen>.*?)</title>', re.S)

    result1 = obj1.finditer(response.text)
    for it1 in result1:
        vid_list = it1.group('vid_list')
        result2 = obj2.finditer(vid_list)
        for it2 in result2:
            vid = it2.group('vid')
            screen_url = 'https://v.qq.com/x/cover/' + cid + '/' + vid + '.html'
            response2 = requests.get(screen_url, headers=headers, proxies=proxy)
            response2.encoding = 'utf-8'
            result3 = obj3.search(response2.text)
            screen = result3.group('screen')
            try:
                MovieList.objects.get(vid__exact=vid)
            except MovieList.DoesNotExist:
                play_title = screen.split('_')[0]
                title = play_title
                MovieList.objects.create(
                    cid=cid,
                    vid=vid,
                    href=screen_url,
                    title=title,
                    play_title=play_title
                )


def cartoon_first():
    url = share.url
    headers = share.first_headers
    params = share.params
    proxy = share.proxy
    data = {
        "page_context": {
            "page_index": "0"
        },
        "page_params": {
            "page_id": "channel_list_second_page",
            "page_type": "operation",
            "channel_id": "100119",
            "filter_params": "sort=75",
            "page": "0"
        },
        "page_bypass_params": {
            "params": {
                "page_id": "channel_list_second_page",
                "page_type": "operation",
                "channel_id": "100119",
                "filter_params": "sort=75",
                "page": "0",
                "caller_id": "3000010",
                "platform_id": "2",
                "data_mode": "default",
                "user_mode": "default"
            },
            "scene": "operation",
            "abtest_bypass_id": "9ff841f2567550ec"
        }
    }
    data = json.dumps(data, separators=(',', ':'))
    response = requests.post(url, headers=headers, params=params, proxies=proxy, data=data)

    data_lists = response.json()["data"]["CardList"][1]["children_list"]["list"]["cards"]
    for data_list in data_lists:
        cid = data_list['params']['cid']
        new_pic_vt = data_list['params']['new_pic_vt']
        title = data_list['params']['title']
        second_title = data_list['params']['second_title']
        time_long = data_list['params']['timelong']

        try:
            Cartoon.objects.get(cid__exact=cid)
            if Cartoon.objects.get(cid__exact=cid).time_long != time_long:
                Cartoon.objects.get(cid__exact=cid).delete()
                Cartoon.objects.create(
                    cid=cid,
                    new_pic_vt=new_pic_vt,
                    title=title,
                    second_title=second_title,
                    time_long=time_long
                )
                cartoon_second(cid, time_long)
        except Cartoon.DoesNotExist:
            Cartoon.objects.create(
                cid=cid,
                new_pic_vt=new_pic_vt,
                title=title,
                second_title=second_title,
                time_long=time_long
            )
            new_cartoon_second(cid, time_long)
    return '动漫已更新完毕'


def cartoon_second(cid, time_long):
    url = "https://v.qq.com/x/cover/" + cid + ".html"
    headers = share.second_headers
    proxy = share.proxy
    response = requests.get(url, headers=headers, proxies=proxy)

    obj1 = re.compile(r'video_ids":\[(?P<vid_list>.*?)],"', re.S)
    obj2 = re.compile(r'"(?P<vid>.*?)"', re.S)
    obj3 = re.compile(r'<title>(?P<screen>.*?)</title>', re.S)

    result1 = obj1.finditer(response.text)
    for it1 in result1:
        vid_list = it1.group('vid_list')
        result2 = obj2.findall(vid_list)
        one = re.search("[0-9]+", time_long)
        if int(one.group()) >= int(50):
            for it2 in result2[-20:]:
                vid = it2
                screen_url = 'https://v.qq.com/x/cover/' + cid + '/' + vid + '.html'
                response2 = requests.get(screen_url, headers=headers, proxies=proxy)
                response2.encoding = 'utf-8'
                result3 = obj3.search(response2.text)
                screen = result3.group('screen')
                try:
                    CartoonList.objects.get(vid__exact=vid)
                except CartoonList.DoesNotExist:
                    play_title = screen.split('_')[0]
                    play_title = play_title.replace('第', ' 第')
                    title = play_title.split(' ')[-1]
                    tow = re.search("[0-9]+", title)
                    if int(one.group()) >= int(tow.group()):
                        try:
                            determine = CartoonList.objects.get(play_title__exact=play_title)
                            determine.delete()
                            CartoonList.objects.create(
                                cid=cid,
                                vid=vid,
                                href=screen_url,
                                title=title,
                                play_title=play_title
                            )
                        except CartoonList.DoesNotExist:
                            CartoonList.objects.create(
                                cid=cid,
                                vid=vid,
                                href=screen_url,
                                title=title,
                                play_title=play_title
                            )
                response2.close()
        else:
            for it2 in result2:
                vid = it2
                screen_url = 'https://v.qq.com/x/cover/' + cid + '/' + vid + '.html'
                response2 = requests.get(screen_url, headers=headers, proxies=proxy)
                response2.encoding = 'utf-8'
                result3 = obj3.search(response2.text)
                screen = result3.group('screen')
                try:
                    CartoonList.objects.get(vid__exact=vid)
                except CartoonList.DoesNotExist:
                    play_title = screen.split('_')[0]
                    play_title = play_title.replace('第', ' 第')
                    title = play_title.split(' ')[-1]
                    tow = re.search("[0-9]+", title)
                    if int(one.group()) >= int(tow.group()):
                        try:
                            determine = CartoonList.objects.get(play_title__exact=play_title)
                            determine.delete()
                            CartoonList.objects.create(
                                cid=cid,
                                vid=vid,
                                href=screen_url,
                                title=title,
                                play_title=play_title
                            )
                        except CartoonList.DoesNotExist:
                            CartoonList.objects.create(
                                cid=cid,
                                vid=vid,
                                href=screen_url,
                                title=title,
                                play_title=play_title
                            )
                response2.close()


def new_cartoon_second(cid, time_long):
    url = "https://v.qq.com/x/cover/" + cid + ".html"
    headers = share.second_headers
    proxy = share.proxy
    response = requests.get(url, headers=headers, proxies=proxy)

    obj1 = re.compile(r'video_ids":\[(?P<vid_list>.*?)],"', re.S)
    obj2 = re.compile(r'"(?P<vid>.*?)"', re.S)
    obj3 = re.compile(r'<title>(?P<screen>.*?)</title>', re.S)

    result1 = obj1.finditer(response.text)
    for it1 in result1:
        vid_list = it1.group('vid_list')
        result2 = obj2.findall(vid_list)
        for it2 in result2:
            vid = it2
            screen_url = 'https://v.qq.com/x/cover/' + cid + '/' + vid + '.html'
            response2 = requests.get(screen_url, headers=headers, proxies=proxy)
            response2.encoding = 'utf-8'
            result3 = obj3.search(response2.text)
            screen = result3.group('screen')
            try:
                CartoonList.objects.get(vid__exact=vid)
            except CartoonList.DoesNotExist:
                play_title = screen.split('_')[0]
                play_title = play_title.replace('第', ' 第')
                title = play_title.split(' ')[-1]
                one = re.search("[0-9]+", time_long)
                tow = re.search("[0-9]+", title)
                if int(one.group()) >= int(tow.group()):
                    try:
                        determine = CartoonList.objects.get(play_title__exact=play_title)
                        determine.delete()
                        CartoonList.objects.create(
                            cid=cid,
                            vid=vid,
                            href=screen_url,
                            title=title,
                            play_title=play_title
                        )
                    except CartoonList.DoesNotExist:
                        CartoonList.objects.create(
                            cid=cid,
                            vid=vid,
                            href=screen_url,
                            title=title,
                            play_title=play_title
                        )
            response2.close()


def renew_proxies():
    proxies = []
    headers = {
        "Connection": "keep-alive",
        "Pragma": "no-cache",
        "Cache-Control": "no-cache",
        "sec-ch-ua": "\";Not A Brand\";v=\"99\", \"Chromium\";v=\"94\"",
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-platform": "\"Windows\"",
        "Upgrade-Insecure-Requests": "1",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.71 Safari/537.36 Core/1.94.209.400 QQBrowser/12.0.5441.400",
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
        "Sec-Fetch-Site": "none",
        "Sec-Fetch-Mode": "navigate",
        "Sec-Fetch-User": "?1",
        "Sec-Fetch-Dest": "document",
        "Accept-Language": "zh-CN,zh;q=0.9"
    }
    url = "https://www.kuaidaili.com/free/"
    response = requests.get(url, headers=headers)

    obj = re.compile(r'<td data-title="IP">(?P<ip>.*?)</td>.*?<td data-title="PORT">(?P<port>.*?)</td>', re.S)
    result = obj.finditer(response.text)
    for it in result:
        ip = it.group('ip')
        port = it.group('port')
        proxies.append("    '" + ip + ":" + port + "'" + ",\n")
    w = ''
    with open('./app/renew/proxies_list.py', 'w+') as f:
        for i in proxies:
            w += i
        f.write("proxy_list = [\n" + w + "]\n")
        f.close()
    return '代理已更新完毕'
