from random import random

import radar as radar
import requests
import re  # 正则

from bs4 import BeautifulSoup
import os
import random
import requests
import radar
import json

all_img_urls = []  # 所有图片链接

all_urls = [1, 2, 3, 4, 5]  # 当前总共5页

headers = {"Content-type": "application/json;charset=utf-8",
           'User-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW 64) AppleWebKit/537.36 (KHTML, like Gecko) '
                         'Chrome/55.0.2883.87 Safari/537.36 QIHU 360SE'}
jar = requests.cookies.RequestsCookieJar()
cookies1 = {"JSESSIONID": "E1F87DB8241A4BABED876D3CAE9A4BAF"}

cookies = {"JSESSIONID": "3f6122f5-acfd-45b2-8f84-91d6c3cfde80",
           "Hm_lpvt_2cb70313e397e478740d394884fb0b8a": "1664023537",
           "Hm_lvt_2cb70313e397e478740d394884fb0b8a": "1664023537"}

all_type = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J"]
# all_type = ["A"]

url = "http://120.55.37.58:8910/book"


def getHtml(page: 1, type_a: "A"):
    return "http://read.nlc.cn/allSearch/searchList?searchType=1&showType=1&pageNo=%s&searchWord=&classification=%s" % (
        page, type_a)


def create_dir_not_exist(path):
    if not os.path.exists(path):
        os.mkdir(path)


if __name__ == "__main__":

    list1 = list((range(1, 3)))

    pageUrl = []
    for i in list1:
        for t in all_type:
            pageUrl.append(getHtml(page=i, type_a=t))

    print("xxxxxxxx")
    print(pageUrl)

    # create_dir_not_exist("./images")
    for i in pageUrl:
        res = requests.get(i, headers=headers)
        # print(res.text)
        html = BeautifulSoup(res.text, 'html.parser')
        # srcs = html.findAll("img")

        srcs = html.find_all('span', class_='tt')
        image = html.find_all("img")
        type_ = html.find('span', class_='YX').text
        authorArr = html.find_all("p", text=lambda x: x and x.startswith('作者'))
        pressArr = html.find_all("p", text=lambda x: x and x.startswith('出版社'))
        # print(html)
        # print(authorArr)
        # print(pressArr)
        # print(len(srcs))
        for index in list(range(0, len(srcs))):
            title = srcs[index].text
            author = authorArr[index].text[4:]
            price = random.randint(10, 300)
            pressTime = radar.random_date("2000-01-01", "2020-01-01")
            press = pressArr[index].text[5:]
            typeIndex = random.randint(0, 12)
            imageUrl = image[index].get('src')
            data = '{"title": "%s","press": "%s","pressTime": "%s","author": "%s","price": "%s","type": "%s","file": "","keyword": "","result": "","image": "%s"}' % (
                title, press, pressTime, author, price, type_, imageUrl)
            print(data)
            rest = requests.post(url=url, headers=headers, cookies=cookies1, data=data.encode())
            print(rest.text)

    # html = BeautifulSoup(res, 'html.parser')
    # html.findAll()
