from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.shortcuts import render
from django.http import HttpResponse
from django.core.serializers.json import DjangoJSONEncoder
import json
import urllib.request
from bs4 import BeautifulSoup
from data.models import Movie
# Create your views here.

def MovieView(request):
    _list = Movie.objects.all().order_by('id')
    data = Paging(request, _list, 10)
    return render(request, 'main.html', data)

def InputGet(request):
    data = Movie.objects.all()
    ret = dict(result=False)
    if 'search1' in request.POST and request.POST['search1']:
        for i in data:
            for j in i.name.split('/'):
                if j.find(request.POST['search1']) != -1:
                    ret['search_text'] = request.POST['search1']
                    ret['result'] = True
    return HttpResponse(json.dumps(ret), content_type='application/json')

def Result(request):
    movie_data = Movie.objects.all()
    _list = []
    if 'search_text' in request.GET and request.GET['search_text']:
        for i in movie_data:
            for j in i.name.split('/'):
                if j.find(request.GET['search_text']) != -1:
                    if i not in _list:
                        _list.append(i)
    data = Paging(request, _list, 10)
    data['search_text'] = request.GET['search_text']
    return render(request, 'search.html', data)

def Paging(request,_list,num):
    name_list = []
    fields = ['id', 'name']
    movie_data = list(Movie.objects.values(*fields))
    for i in movie_data:
        for j in i['name'].split('/'):
            name_list.append(j)
    paginator = Paginator(_list, num)
    # 取出当前需要展示的页码, 默认为1
    page_num = request.GET.get('page', default='1')
    # 根据页码从分页器中取出对应页的数据
    try:
        page = paginator.page(page_num)
    except PageNotAnInteger as e:
        # 不是整数返回第一页数据
        page = paginator.page('1')
        page_num = 1
    except EmptyPage as e:
        # 当参数页码大于或小于页码范围时,会触发该异常
        print('EmptyPage:{}'.format(e))
        if int(page_num) > paginator.num_pages:
            # 大于 获取最后一页数据返回
            page = paginator.page(paginator.num_pages)
        else:
            # 小于 获取第一页
            page = paginator.page(1)

    # 这部分是为了再有大量数据时，仍然保证所显示的页码数量不超过10，
    page_num = int(page_num)
    if page_num < 6:
        if paginator.num_pages <= 10:
            dis_range = range(1, paginator.num_pages + 1)
        else:
            dis_range = range(1, 11)
    elif (page_num >= 6) and (page_num <= paginator.num_pages - 5):
        dis_range = range(page_num - 5, page_num + 5)
    else:
        dis_range = range(paginator.num_pages - 9, paginator.num_pages + 1)
    data = {'page': page, 'paginator': paginator, 'dis_range': dis_range, 'name_list': name_list}
    return data



#爬取电影数据
# def addhiatus(request):
#     data = Movie.objects.all()
#     headers = {
#             "Accept": "application/json, text/javascript, */*; q=0.01",
#             "X-Requested-With": "XMLHttpRequest",
#             "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36",
#             "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8"
#         }
#     _list = []
#     for i in range(0, 10):
#         urlpath = "https://movie.douban.com/top250" + '?start=' + str(i * 25) + '&filter='
#         req = urllib.request.Request(urlpath, headers=headers)
#         # 开始请求，获取响应数据
#         resp = urllib.request.urlopen(req)
#         # 开始解析
#         bsoup = BeautifulSoup(resp, "html.parser")
#         psoup = bsoup.find("div", {"class": "grid-16-8 clearfix"}).find("ol", {"class": "grid_view"})
#         datalist = psoup.find_all("li")
#         for i in datalist:
#             dic = {}
#             dic['img_name'] = i.find("img", {"class": ""}).get("src").split("/")[-1]
#             dic['movie_url'] = i.find("div", {"class": "hd"}).find("a").get("href")
#             spanlist = []
#             for j in i.find("div", {"class": "star"}).find_all("span"):
#                 try:
#                     spanlist.append(j.get_text())
#                 except:
#                     print()
#             dic['score_num'] = spanlist[-1]
#             _list.append(dic)
#     j = 0
#     for i in data:
#         i.img_name = _list[j]['img_name']
#         i.movie_url = _list[j]['movie_url']
#         i.score_num = _list[j]['score_num']
#         i.save()
#         j+=1
#     return HttpResponse("<p>数据添加成功！</p>")

# def download(request):
#     fields = ['name', 'img_url']
#     data = Movie.objects.values(*fields)
#     for i in data:
#         try:
#             urllib.request.urlretrieve(i['img_url'], i['img_url'].split("/")[-1])
#         except:
#             print(i)
#     return HttpResponse("<p>数据添加成功！</p>")

# def addData(request):
#     # 设置请求体
#     headers = {
#         "Accept": "application/json, text/javascript, */*; q=0.01",
#         "X-Requested-With": "XMLHttpRequest",
#         "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36",
#         "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8"
#     }
#     # 设置请求体
#     for i in range(0, 10):
#         urlpath = "https://movie.douban.com/top250" + '?start=' + str(i * 25) + '&filter='
#         req = urllib.request.Request(urlpath, headers=headers)
#         # 开始请求，获取响应数据
#         resp = urllib.request.urlopen(req)
#         # 开始解析
#         bsoup = BeautifulSoup(resp, "html.parser")
#         psoup = bsoup.find("div", {"class": "grid-16-8 clearfix"}).find("ol", {"class": "grid_view"})
#         datalist = psoup.find_all("li")
#         _list = []
#         for i in datalist:
#             dic = {}
#             dic['name'] = i.find("div", {"class": "hd"}).get_text().replace('\n', '').replace('\xa0', '').replace(' ', '').split('[')[0]
#             dic['actor'] = i.find("div", {"class": "bd"}).find("p", {"class": ""}).get_text().split("\n")[1].replace(' ', '').replace('\xa0', ' ')
#             dic['releasetime'] = i.find("div", {"class": "bd"}).find("p", {"class": ""}).get_text().split("\n")[2].replace('\xa0', ' ').lstrip()
#             dic['stars'] = i.find("span", {"class": "rating_num"}).get_text()
#             try:
#                 dic['describe'] = i.find("span", {"class": "inq"}).get_text()
#             except:
#                 print("-----------------------------------------------------------------")
#             dic['img_url'] = i.find("img", {"class": ""}).get("src")
#             try:
#                 Movie(name=dic['name'], actor=dic['actor'], releasetime=dic['releasetime'], stars=dic['stars'], describe=dic['describe'], img_url=dic['img_url']).save()
#             except:
#                 Movie(name=dic['name'], actor=dic['actor'], releasetime=dic['releasetime'], stars=dic['stars'], describe='', img_url=dic['img_url']).save()
#
#     return HttpResponse("<p>数据添加成功！</p>")