from django.shortcuts import render

# Create your views here.
from django.http import HttpResponse
import json
from bs4 import BeautifulSoup
import requests
import re
import math

#设置
def setting():
    headers = {
        "Accept": "text / html, application / xhtml + xml, application / xml;q = 0.9, image / webp, image / apng, * / *;q = 0.8",
        "Accept - Encoding": "gzip, deflate",
        "Accept - Language": "zh - CN, zh;q = 0.9, en;q = 0.8",
        "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36"
    }

    rootUrl = "http://192.168.200.247/opac/openlink.php?"

    proxies = {
        "http": "http://qf:qf3.1415@221.232.137.211:10086/"
    }

    return headers,rootUrl,proxies

#拼接参数
def joinStrC(items):
    str = ""
    for item in items:
        a = item[0] + "=" + item[1]
        str = str + "&" + a

    return str.strip("&")

####################################格式化获取数据的函数####################################
def huiwenData(url,headers,proxies):
    # return HttpResponse(json.dumps(request.GET.items()), content_type="application/json")
    data = {
        "bookList": [],
        "pageInfo": [],
    }

    try:
        web_data = requests.get(url=url, headers=headers, proxies=proxies,timeout=8).content.decode("utf-8", "ignore")
    except:
        return data

    soup = BeautifulSoup(web_data, "html5lib")

    # #获取列表信息
    linkList = soup.select('.book_list_info')
    bookListInfo = formBookListInfo(linkList)
    if len(bookListInfo) == 0:
        return data
    #
    # #获取分页信息
    linkList = soup.select('.search_form')[0]
    pageInfo = getPageInfo(linkList)


    data["bookList"] = bookListInfo
    data["pageInfo"] = pageInfo


    return data


#处理格式
def formFormatX(data):
    for i in range(len(data)):
        pattern = re.compile(r'^([0-9]*?\.)')
        pattern1 = re.compile(r'[^0-9]*')
        pattern2 = re.compile(r'\(0\)')
        try:
            data[i]["bookName"] = re.sub(pattern, '', data[i]["bookName"])
            data[i]["collection"] = re.sub(pattern1, '', data[i]["collection"])
            data[i]["borrow"] = re.sub(pattern1, '', data[i]["borrow"])
            data[i]["bookTime"] = re.sub(pattern2, '', data[i]["bookTime"])
        except:
            pass
        # return HttpResponse([data[i]["bookName"]], content_type="application/json")

    return data

#获取图书相关信息
def formBookListInfo(dataList):
    allBookInfo = []

    for dd in dataList:

        bookListInfo = {
            "bookName": "",
            "bookType": "",
            "bookAuthor": "",
            "bookTime": "",
            "callNum":"",
            "collection": "",
            'borrow': "",
            "marc_no": "",
        }

        soup = BeautifulSoup(str(dd), "html5lib")



        bookListInfo["bookName"] = soup.select('a')[0].text

        bookListInfo["marc_no"] = getMarcNo(soup.select('a')[0]["href"])

        bookListInfo["bookType"] = soup.select('h3 > span')[0].text
        #索书号
        bookListInfo["callNum"] = getCallNum(soup.select('h3'))

        auTiList = getAuTi(soup.select('p')[0])
        try:
            bookListInfo["collection"] = auTiList[0]
            bookListInfo["borrow"] = auTiList[1]
            bookListInfo["bookAuthor"] = auTiList[2]
            bookListInfo["bookTime"] = auTiList[3]
        except:
            pass

        allBookInfo.append(bookListInfo)

    return allBookInfo

def getCallNum(list):

    rex = ">([\s\S]*?)<"
    px = re.compile(rex)
    arrayD = px.findall(str(list[0]))

    return arrayD[-1].strip()

#获取分页信息
def getPageInfo(strr):
    pageInfo = {
        "allNum": "",
        "keyWord": "",
        "allPage": ""
    }
    soup = BeautifulSoup(str(strr), "html5lib")
    pageInfo["allNum"] = soup.select("p > strong")[0].text
    pageInfo["keyWord"] = soup.select("p > font")[0].text
    pageInfo["allPage"] = math.ceil(int(pageInfo["allNum"]) / 20)
    return pageInfo

#获取图书详情id
def getMarcNo(str):
    px = re.compile(r"=(.*)$")
    arrayD = px.findall(str)
    return arrayD[0]


#获取作者 时间 馆藏 可借等
def getAuTi(strr):
    px = re.compile(r">([\s\S]*?)<")
    arrayD = px.findall(str(strr))

    arrayN = []
    for i in range(len(arrayD)):
        arrayD[i] = arrayD[i].strip().replace("\xa0",'')
        if arrayD[i] != '':
            arrayN.append(arrayD[i])
    return arrayN


#主函数
def index(request):

    headers, rootUrl, proxies = setting()

    page = request.POST.get("page",1)
    count = request.POST.get("count", 1)
    strText = request.POST.get("strText", '')

    searchUrl = "strSearchType=title&match_flag=any&historyCount=1&strText={}&doctype=ALL&with_ebook=on&displaypg=20&showmode=list&sort=CATA_DATE&orderby=desc&location=ALL"
    pageUrl = "strSearchType=title&match_flag=any&historyCount=1&doctype=ALL&with_ebook=on&displaypg=20&showmode=list&sort=CATA_DATE&orderby=desc&location=ALL&strText={}&count={}&page={}"

    if page == 1:
        c = searchUrl.format(strText)
    else:
        c = pageUrl.format(strText,count,page)

    url = rootUrl + c

    data = huiwenData(url,headers,proxies)

    return HttpResponse(json.dumps(data), content_type="application/json")





