# -*- coding: utf-8 -*-
"http://temp.163.com/special/00804KVA/cm_guoji.js?callback=data_callback"
"http://temp.163.com/special/00804KVA/cm_guonei.js?callback=data_callback"
"http://temp.163.com/special/00804KVA/cm_shehui.js?callback=data_callback"


"http://comment.news.163.com/api/v1/products/a2869674571f77b5a0867c3d71db5856/threads/DGNAUKHG0001875O/comments/newList?offset=30&limit=30&showLevelThreshold=72&headLimit=1&tailLimit=2&callback=getData"

"a2869674571f77b5a0867c3d71db5856"
"a2869674571f77b5a0867c3d71db5856"
import requests
from pyquery import PyQuery as jq
import json,re
import logging
import pandas as pd

logging.basicConfig(level = logging.INFO,format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')

datacol = {0:'新闻标签',1:'新闻来源',2:'新闻发布时间',3:'评论数量',4:'新闻标题',5:'用户名',6:'用户IP',7:'发布时间',8:'顶(数量)',9:'评论内容',10:'回复'}
typeList = ['国内','社会','国际']
class news():
	def __init__(self):
		self.allList = []
		self.first = '国内'
		self.session = requests.Session()
		self.logger = logging.getLogger("__main__")



	def resp_check(self, resp, login=False):
		"""响应处理，429,403错误自动切换用户"""
		netcode = resp.status_code
		if netcode == 200:
			return True
		else:
			return False

	#jsonp转化为json
	def loads_jsonp(self,_jsonp):
		try:
			return json.loads("".join("".join(_jsonp.split("(")[1:]).split(")")[:-1]))
		except:
			raise ValueError('Invalid Input')

	def getList(self):
		self.logger.info("国际数据")
		self.first = '国际'
		for index in range(1,3):
			self.logger.info(f"正在爬取第{index}页新闻数据")
			if index == 1:
				indexs = ""
			else:
				indexs = "_0" + str(index)
			resp = self.session.get(f"http://temp.163.com/special/00804KVA/cm_guoji{indexs}.js?callback=data_callback")
			if self.resp_check(resp):
				myjson = self.loads_jsonp(resp.text)
				self.papresJson(myjson)
		with open('guoji.json','w',encoding='utf-8') as f:
			json.dump(self.allList, f)
		df = pd.DataFrame(self.allList)
		df.rename(columns=datacol,inplace=True)
		df.to_excel('国际.xlsx')

		self.allList = []
		self.logger.info("国内数据")
		self.first = '国内'
		for index in range(1,3):
			self.logger.info(f"正在爬取第{index}页新闻数据")
			if index == 1:
				indexs = ""
			else:
				indexs = "_0" + str(index)
			resp = self.session.get(f"http://temp.163.com/special/00804KVA/cm_guonei{indexs}.js?callback=data_callback")
			if self.resp_check(resp):
				myjson = self.loads_jsonp(resp.text)
				self.papresJson(myjson)
		with open('guonei.json','w',encoding='utf-8') as f:
			json.dump(self.allList, f)
		df = pd.DataFrame(self.allList)
		df.rename(columns=datacol,inplace=True)
		df.to_excel('国内.xlsx')

		self.allList = []
		self.logger.info("社会数据")
		self.first = '社会'
		for index in range(1,3):
			self.logger.info(f"正在爬取第{index}页新闻数据")
			if index == 1:
				indexs = ""
			else:
				indexs = "_0" + str(index)
			resp = self.session.get(f"http://temp.163.com/special/00804KVA/cm_shehui{indexs}.js?callback=data_callback")
			if self.resp_check(resp):
				myjson = self.loads_jsonp(resp.text)
				self.papresJson(myjson)
		with open('shehui.json','w',encoding='utf-8') as f:
			json.dump(self.allList, f)
		df = pd.DataFrame(self.allList)
		df.rename(columns=datacol,inplace=True)
		df.to_excel('社会.xlsx')



	def parseJson(self,jsons):
		if len(jsons['commentIds']) != 0:
			for ids in jsons['commentIds']:
				allcomment = {}
				strs = ""
				for index,id in enumerate(ids.split(',')):
					comments = jsons['comments'][id]
					if index == 0:
						user = comments['user']
						if 'nickname' in user:
							name = user['nickname']
						else:
							name = ""
						ip = user['location']
						times = comments['createTime']
						vote = comments['vote']
						content = comments['content']
						allcomment['name'] = name
						allcomment['ip'] = ip
						allcomment['times'] = times
						allcomment['vote'] = vote
						allcomment['content'] = content
					else:
						strs = strs + "/" + comments['content']
				lists = ['','','','','',allcomment['name'],allcomment['ip'],allcomment['times'],allcomment['vote']
								,allcomment['content'],strs]
				self.allList.append(lists)		


	def getCommentList(self,types,num):
		resp = self.session.get(f"http://comment.news.163.com/api/v1/products/a2869674571f77b5a0867c3d71db5856/threads/{types[0]}/comments/newList?offset=0&limit=30&showLevelThreshold=72&headLimit=1&tailLimit=2&callback=getData")
		if self.resp_check(resp):
			myjson = self.loads_jsonp(resp.text)
			self.parseJson(myjson)
			max_num = myjson['newListSize']
			self.logger.info(f"正在爬取第0页评论数据{max_num}")
			for index in range(1,max_num //30 + 1):
				size = index * 30
				self.logger.info(f"正在爬取第{index}页评论数据")
				resp = self.session.get(f"http://comment.news.163.com/api/v1/products/a2869674571f77b5a0867c3d71db5856/threads/{types[0]}/comments/newList?offset={size}&limit=30&showLevelThreshold=72&headLimit=1&tailLimit=2&callback=getData")
				if self.resp_check(resp):
					myjson = self.loads_jsonp(resp.text)
					self.parseJson(myjson)
					

	def papresJson(self,jsons):
		for item in jsons:
			title = item['title']
			urls = item['docurl']
			time = item['time']
			comment_num = item['tienum']
			source = jq(self.session.get(urls).text)('.post_time_source > a').text()
			url_type = urls.split("/")[-1].split(".")[:1]
			lists = [self.first,source,time,comment_num,title]
			self.allList.append(lists)
			self.getCommentList(url_type,comment_num)




if __name__ == '__main__':
	news().getList()


