import requests,os
from bs4 import BeautifulSoup as bs

def getpage():
	'''
	根据url和请求头爬取购物车信息
	购物车商品已选择两个 分别为：
	欧米伽女士手表
	联想游戏本
	'''
	url='https://cart.jd.com/cart.action'

	headers={
		'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
		'Accept-Encoding':'gzip, deflate, br',
		'Accept-Language':'zh-CN,zh;q=0.9',
		'Cache-Control':'max-age=0',
		'Connection':'keep-alive',
		'Cookie':'shshshfpa=31fa953e-b3ae-963c-98a2-29b0685c8e0f-1527597322; areaId=1; __jdv=122270672|direct|-|none|-|1536841193966; user-key=5e4f5abe-8e25-43af-b8e0-4b4bdc1f1733; TrackID=18ykcklc6xVBqplROIivpXrAn6Jg0NPWkG--D13lMCYhZvx2iJFCp_qf_V84wbbX8KMKKM3RA8A-3uK_vSbhbci9vGMxKKmfU3b8O0jgcEfo; thor=2769923E385856B1D15FF255B9EFB92A8443B867E0F3D295599547BC18BC1B397203F56DE99A121F3FA485CAEEB72ED2EB59615E4F7DA7B1C070DE8738DBADF82FC9B8F9451538F4191C1022D60E4BFA187C5BC3F3CB93B0568190F791E6CE3AD783698A36582BF69EC724EC2CEDED6E99E5077EC3A61810ADA7061E9650CD3F5B7E83F1C8614E1090EF609DB5F1CDFE; pinId=I5FqnWH2iZzvH6PAy6UGog; pin=ffllyy2014; unick=ffllyy2014; ceshi3.com=103; _tp=yx5ZMtJJK0cs4n%2BApzCETw%3D%3D; _pst=ffllyy2014; PCSYCityID=1; cart-main=xx; cd=0; cn=2; shshshfp=76c81e9b96f1861c060faf9927cf3aa4; shshshsID=7e81dacdfb70ab844365edcd4830535a_6_1537001069253; shshshfpb=142a4e7c91cd94fe08b49799370d71c7869b377ce2c076dad5b0d490cb; __jda=122270672.1913593827.1527597317.1536841194.1537000762.5; __jdb=122270672.13.1913593827|5.1537000762; __jdc=122270672; 3AB9D23F7A4B3C9B=2RLFGH2LTIAWMIUSWXHXR2JP74EBOSQCXMFOV6AK6VSGUUQDU735ASERPOZ27TUDLCACECV562NS6BWUUUIYFOVLWU; __jdu=1913593827; ipLoc-djd=1-72-4137-0',
		'Host':'cart.jd.com',
		'Upgrade-Insecure-Requests':'1',
		'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.26 Safari/537.36 Core/1.63.5478.400 QQBrowser/10.1.1550.400',
	}

	try:
		res = requests.get(url,headers=headers)
		if res.status_code==200:
			return res.text
		else:
			return None
	except Exception as err:
		print("爬取失败，原因是："+str(err))
		return None

def parsepage(content):
	'''解析html，获取商品的商品名、颜色、价格、单价、数量、总价、小结等信息'''
	if content:
		soup=bs(content,'lxml')
		items = soup.find_all(name="div",attrs={"product":"1"})
		for item in items:
			yield {
				'title':item.find(name="div",attrs={'class':'p-name'}).a.get_text().strip(),
				'color':item.find(name="div",attrs={'class':'props-txt'}).string.strip(),
				'price':item.find(name="div",attrs={'class':'cell p-price p-price-new '}).strong.string,
				'quantity':item.find(name="input",attrs={'autocomplete':'off'}).attrs['value'],
				'total':item.find(name="div",attrs={'class':'cell p-sum'}).strong.string,
				'all':soup.find(name="span",attrs={"class":"price sumPrice"}).em.get_text()
			}

def showpage(content,no):
	'''打印购物车信息'''
	if content:
		print("第"+str(no)+"件商品信息")
		print("名称："+content['title'])
		print(content['color'])
		print("单价："+content['price'])
		print("数量："+content['quantity'])
		print("总价："+content['total'])


def main():
	html = getpage()
	items=list(parsepage(html))
	no=1
	print("当前京东购物车有"+str(len(items))+"件商品")
	for item in items:
		showpage(item,no)
		no=no+1
	print("当前购物车商品总价为："+items[0]['all']+"元")

if __name__ == '__main__':
	main()