# -*-coding: utf-8 -*-
'''
Created on 2018-7-10

@author: xubaifu

网络爬虫
'''
from urllib import request
from bs4 import BeautifulSoup            #Beautiful Soup是一个可以从HTML或XML文件中提取结构化数据的Python库
from sqlalchemy.sql.expression import except_

import requests
import re
import random
import time
import unicodedata
import emoji
import sys


s = requests.session()

#获取评论内容和下一页链接
def get_data(html):
    soup = BeautifulSoup(html,"lxml")
    comment_list = soup.select('.comment > p')
    next_page = soup.select('.next')[0].get('href')
    return comment_list,next_page
 
if __name__ =="__main__":
    absolute = 'https://movie.douban.com/subject/27605698/comments'
    
    current_page = absolute
    next_page = ""
    comment_list = []
    temp_list = []
    num = 0
    with open('txt/movie.txt','r', encoding='UTF-8') as fileId:
        for movie_id in fileId:
            movie_id = movie_id.strip()
        
#             page_num = [20 * (x - 1) for x in range(1,11)]#默认值抓取前二十页评论（质量较高）
            
            request_url = 'https://36kr.com/p/%s' %(movie_id)
            print(request_url)
            html = s.get(request_url).content
            #temp_list,next_page = get_data(html)
            soup = BeautifulSoup(html,"lxml")
            comment_list = soup.select('.common-width > p')
            #next_page = soup.select('.next')[0].get('href')
            comment_list = comment_list + temp_list
            print(comment_list)
            time.sleep(float(random.randint(1, 10)) / 10)
            num = num + 1
            #每20次更新一次ip
        fileId.close()
#     while(1):
#         html = s.get(current_page, cookies=cookies, headers=headers, proxies=proxies).content
#         temp_list,next_page = get_data(html)
#         if next_page is None:
#             break
#         current_page = absolute + next_page
#         comment_list = comment_list + temp_list
#         #time.sleep(1 + float(random.randint(1, 100)) / 20)
#         num = num + 1
#         #每20次更新一次ip
#         if num % 20 == 0:
#             proxies = get_random_ip(ip_list)
#         print( current_page)
# #     print(comment_list)
#     #将爬取的评论写入txt文件中
#     with open("C:\\Users\\xubaifu\\Desktop\\articles.txt", 'a')as f:
#         for node in comment_list:
#             comment = node.get_text().strip().replace("\n", "")
# #             中文标点转英文标点
# #             comment = unicodedata.normalize('NFKC', comment)
# #             去除emoji表情
#             emoji.demojize(comment)
#             try:
#                 f.write(comment + "\n")
#             except:
#                 print(111)
#     f.close()



# with open(r"C:\Users\xubaifu\Desktop\articles.txt","w") as file: 
#     file.write("")
#     file.close()
# L = [20 * (x - 1) for x in range(1,1000)]
# for start in L:
#     
#     #构造头文件，模拟浏览器访问
#     url="https://movie.douban.com/subject/27605698/comments?start=%d&limit=20&sort=new_score&status=P" %start
#     print(url)
#     headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'}
#     page = request.Request(url,headers=headers)
#     page_info = request.urlopen(page).read().decode('utf-8')#打开Url,获取HttpResponse返回对象并读取其ResposneBody
#       
#     # 将获取到的内容转换成BeautifulSoup格式，并将html.parser作为解析器
#     soup = BeautifulSoup(page_info, 'html.parser')
# #     print(soup)
#     # 以格式化的形式打印html
#     #print(soup.prettify())
#       
#     titles = soup.find_all('span', 'short')# 查找所有a标签中class='title'的语句
#     print(len(titles))
#     '''
#     # 打印查找到的每一个a标签的string和文章链接
#         for title in titles:
#             print(title.string)
#             print("http://www.jianshu.com" + title.get('href'))   
#     '''
# #     open()是读写文件的函数,with语句会自动close()已打开文件
#     with open(r"C:\Users\xubaifu\Desktop\articles.txt","a") as file:       #在磁盘以只写的方式打开/创建一个名为 articles 的txt文件
#         for title in titles:
#             try:
#                 file.write(title.string+'\n')
#             except:
#                 print(111)
# #             file.write("http://www.jianshu.com" + title.get('href')+'\n\n')
#     file.close()   


