# -*- coding: utf-8 -*-
"""
__title__ = ''
__author__ = 'Yang'
__mtime__ = '2018/12/11 20:55'
# code is far away from bugs with the god animal protecting
    I love animals. They taste delicious.
              ┏┓      ┏┓
            ┏┛┻━━━┛┻┓
            ┃      ☃      ┃
            ┃  ┳┛  ┗┳  ┃
            ┃      ┻      ┃
            ┗━┓      ┏━┛
                ┃      ┗━━━┓
                ┃  神兽保佑    ┣┓
                ┃　永无BUG！   ┏┛
                ┗┓┓┏━┳┓┏┛
                  ┃┫┫  ┃┫┫
                  ┗┻┛  ┗┻┛
"""

import re
import time
import pymysql
import requests
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
from sqlalchemy import create_engine

# conn = create_engine('mysql+pymysql://root:123@localhost:3306/student?charset=utf8')
# pd.set_option('display.max_rows', 5000)
# pd.set_option('display.max_columns', 5000)
# db = pymysql.connect(host='localhost', port=3306, user='root', passwd='123', db='pc')
# cursor = db.cursor()
# cursor.execute('select * from douban_book_info')
# data = cursor.fetchall()
#
# df = pd.DataFrame(list(data),
#                   columns='id,url,book_name,book_lable,book_author,book_publish,book_price,book_img,book_intro'.split(
#                       ','))
#
# print(df['url'].to_csv('url.csv', index=False))

headers = {'Accept': '*/*',
           'Accept-Language': 'en-US,en;q=0.8',
           'Cache-Control': 'max-age=0',
           'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36',
           'Connection': 'keep-alive',
           'Referer': 'http://www.baidu.com/'
           }


def get_soup(url):
    page = requests.get(url.format(0), headers=headers)
    page.encoding = page.apparent_encoding
    page = BeautifulSoup(page.text.replace('\n', ''), 'lxml')
    return page


url = 'https://book.douban.com/subject/1856494/'

# intro 一般是3条，如果是3条，第一二条都是书籍简介，第三条是作者简介
# outline一般是1条，书籍的目录信息 E
# outline = soup.select('#dir_' + isbn + '_full')
# img_url = soup.select('a.nbg')[0].get('href')
# isbn = '1856494'
# intro = soup.select('div.intro')
# # intro 一般是3条，如果是3条，第一二条都是书籍简介，第三条是作者简介
# # outline一般是1条，书籍的目录信息 E
# outline = soup.select('#dir_' + isbn + '_full')
# img_url = soup.select('a.nbg')[0].get('href')
# s = str(url) + '\n' + str(img_url) + '\n'
# print(s)
# # 1 是内容简介，2是内容简介加作者简介，3可能是作者或书籍其中之一更多选项，4是两个都有一个更多选项
# # 现将3单独用函数判断
# intro = [str(i).replace('\n', '') for i in intro]
# print(len(intro))
# if len(intro) == 2:
#     s = s + str(intro[0]) + '\n' + str(intro[1]) + '\n'
# elif len(intro) == 1:
#     s = s + str(intro[0]) + '\n' + '\n'
# elif len(intro) == 3:
#     # s = s + str(intro[1]) + ',' + str(intro[2]) + ','
#     l = [i for i in intro if str(i).find('a_show_full') == -1]
#     print(len(l))
#     s = s + str(l[0]) + '\n' + str(intro[1]) + '\n'
# elif len(intro) == 4:
#     s = s + str(intro[1]) + '\n' + str(intro[3]) + '\n'
# outline = [str(i).replace('\n', '') for i in outline]
# if len(outline) == 1:
#     s = s + str(intro[0])
# elif len(outline) == 2:
#     s = s + str(intro[1])
# elif len(outline) == 0:
#     s = ''
#
# with open('csv/' + str(isbn) + '.txt', 'w', encoding='utf-8') as book_infos_f:
#     book_infos_f.writelines(s)
# time.sleep(1)


s = 'abcde'
# print(s[::-1])
# print(s[::1])
# print(s)
# print(list(reversed(s)))
# count = ''
# for i in reversed(s):
#     count = i + count
#
# print(count)
#
# import os
#
# print(os.listdir('/'))
# print(os.getcwd())
s = requests.get('http://132.232.137.13:8080/test.json')
print(s.text)
