import requests
from bs4 import BeautifulSoup
import pandas as pd
import time
from fake_useragent import UserAgent
import chardet
import urllib
import pickle
from PIL import Image
import pytesseract
from selenium import webdriver


#遵守Rotbos
def robot_check(robottxt_url,headers_value,url):
    rp=urllib.robotparser.RobotFileparser()
    rp.ser_url(robottxt_url)
    rp.read()
    result=rp.can_fetch(headers_value,url)
    return result

#识别验证码
def Reconition_code():
    #image=Image.open("123.jpg")
    image = Image.open("captcha.jpg")
    vcode=pytesseract.image_to_string(image)
    Image._show(image)
    print(vcode)

#动态内容添加
def Attain_Dynamic_Conrent():
    try:
        driver = webdriver.Chrome(executable_path="chromedriver.exe")
    except:
        print("加载错误")

    driver.get(urls)

    print(driver.page_source)
    time.sleep(50)
    driver.quit()
#模拟登录
def get_cookie_from_net():
    url='https://accounts.douban.com/j/mobile/login/basic'
    payload={
        'ck':"",
        'name': '215654412',
        'password': '19990227lsq',
        'remeber': 'false',
        'ticket': '',
    }
    data = s.post(url,headers=headers_values,data=payload).json()
    data = s.post(url, headers=headers_values, data=payload).json()

    print(data['status'])
    if data['status']=='success':
        print('登录成功')
        with open('cookies.douban','wb') as f:
            cookiedict=requests.utils.dict_from_cookiejar(s.cookies)
            pickle.dump(cookiedict,f)
    return  s.cookies##

def get_cookie_from_file():
    with open('cookie.douban','rb') as f:
        cookiedict=pickle.load(f)
        cookies=requests.utils.cookiejar_from_dict(cookiedict)
        print('解析文件，成功提取cookies')
    return cookies

def getdata(html):
    soup=BeautifulSoup(html.text,'lxml')
    mydata=soup.select('#display')[0].get_text()
    return mydata

def login_and_getdata():
    print('获取cookies')

    try:
        s.cookies=get_cookie_from_file()
    except:
        print('从文件获取cookie失败\n正在尝试提交表单')

    s.cookies=get_cookie_from_net()
    html = s.get('https://www.douban.com/people/215654412/', headers=headers_values)
    data = getdata(html)
    print(data)



#请求数据
def get_data(url,headers_value,num_tries):
    try:
        data=requests.get(url,headers=headers_value)
        charset=chardet.detect(data.content)
        print(charset)
        data.enconding='utf-8'
        print(data.status_code)
    except requests.exceptions.ConnectionError as e:
        print("请求错误,url=", url)
        print("请求错误：",e)
        data=None
    if(data!=None) and (499<data.status_code<600):
        if(num_tries>0):
            print("服务器错误，正在重拾。。。")
            #time.sleep(1)
            data=get_data(url,headers_value,num_tries-1)
        else:
            print("服务器无法访问")
            data = None
            exit(1)
    return data
#print(data.text)
#请求数据

#提取数据
def parse_data(data):
    soup=BeautifulSoup(data.text,'lxml')


    books_left=soup.find('ul',{'class':"cover-col-4 clearfix"})
    books_left=books_left.find_all("li")

    books_right=soup.find('ul',{'class':"cover-col-4 pl20 clearfix"})
    books_right=books_right.find_all("li")

    books=list(books_left)+list(books_right)



    img_urls=[];titles=[];ratings=[];authors=[];details=[];

    for book in books:
        img_url=book.find_all('a')[0].find('img').get('src')
        img_urls.append(img_url)

        title=book.find_all('a')[1].get_text()
        titles.append(title)

        rating=book.find('p',{'class':'rating'}).get_text()
        rating=rating.replace('\n','').replace(' ','')
        ratings.append(rating)

        detail=book.find_all('p')[2].get_text()
        detail = detail.replace('\n', '').replace(' ', '')
        details.append(detail)

        author=book.find('p',{'class':'color-gray'}).get_text(strip=True)
        #autho=author.replace('\n', '').replace(' ', '')
        authors.append(author)

    print("img_urls:",img_urls)
    print("titles:",titles)
    print("details:",details)
    print("authors:",authors)
    print("ratings:",ratings)
    return img_urls,titles,details,authors,ratings
#提取数据

#存储数据
def save_data(img_urls,titles,details,authors,ratings):
    result=pd.DataFrame()
    result['img_urls']=img_urls
    result['titles']=titles
    result['details']=details
    result['authors']=authors
    result['ratings']=ratings
    result.to_csv('result.csv',encoding="utf_8_sig",index=None)
#存储数据


#网络爬虫
if __name__ == '__main__':
    #Reconition_code()
    #urls='https://miaosha.jd.com/'
    #Attain_Dynamic_Conrent()

    #urls=['https://book.douban.com/latest','https://baidusssss.com','https://book.douban.com/latest','http://httpstat.us/500']
    urls = ['https://book.douban.com/latest']
    img_urls_squence=[];titles_squence=[];details_squence=[];authors_squence=[];ratings_squence=[];

    #robots_url=['https://www.douban.com/robots.txt']

    s=requests.session()
    ua=UserAgent()
    headers_values={'User-Agent':ua.ie,
                    'User-Agent':"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36"}
    login_and_getdata()
    for url in urls:
        #if robots_check(robot_url,headers_value,url):
        data=get_data(url,headers_values,3)
        if(data!=None):
            img_urls, titles, details, authors, ratings=parse_data(data)
        img_urls_squence.append(img_urls);titles_squence.append(titles);details_squence.append(details);authors_squence.append(authors);ratings_squence.append(ratings);
    save_data(img_urls, titles, details, authors, ratings)

# 网络爬虫