#coding:utf-8
from bs4 import BeautifulSoup
import requests

#将PC端的url转为移动端url
'''
例如将：https://detail.tmall.com/item.htm?spm=a230r.1.14.30.112c68bc6tlxaU&id=560090110694&ns=1&abbucket=8&sku_properties=5919063:6536025
转化为：
https://detail.m.tmall.com/item.htm?id=560090110694&sku_properties=1627207:28338
'''
class TBSpider(object):
    def __init__(self,pc_url):
        #self.itemID = pc_url.split("id=")[1].split("&")[0]
        #print 'get url is:'+pc_url
        self.itemID = pc_url.split("id=")[1].split(' ')[0]
        self.url = "https://detail.m.tmall.com/item.htm?id="+self.itemID
#获取ID
    def getItemID(self):
        return self.itemID
        
#获取价格
    def getCount(self):
        user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
        headers={'User-Agent':user_agent}
        try:
            r = requests.get(self.url,headers=headers)
            soup = BeautifulSoup(r.text,'html.parser',from_encoding='utf-8')
        except  NameError, msg:
            print  msg
            return 'error'
        content=""
        count = ""
        try:
            for script in soup.findAll("script"):
                #print script
                content+="  "+str(script)
           # print 'len is '+ str(len(content.split("sellCount\":")))
           # print content.split("sellCount\":")[0]
            return content.split("sellCount\":")[1].split(",")[0]
        except  NameError, msg:
            print  msg
            return 'error'

        
'''
spider = TBSpider("https://detail.m.tmall.com/item.htm?id=39061029736")
print spider.getCount()
'''
'''
document = open("testfile.txt", "w+");
document.write(content.split("sellCount\":")[1].split(",")[0]);
document.close()
'''