import json,re,time,urllib2
import os


def make_dir(path):
    if  not os.path.exists(path):
        #shutil.rmtree(path)
        os.mkdir(path)
def getImgList(url):
    print url
    response = urllib2.urlopen("https://"+url).read()
    print response
    matchObj = re.search(r'topImages:(.*)}', response,re.M|re.I)
    imgs=''
    if matchObj:
        imgStr=matchObj.group(1).strip().strip('[]')
        imgs=imgStr.replace(r'"','')
    return imgs

def saveImg(imgs,dir):
    for imgUrl in imgs :
        print imgUrl
        img=urllib2.urlopen(imgUrl).read()
        img_path=dir+str(time.time())+".jpg"
        f = file(img_path,"wb")
        f.write(img)
    f.close()

def start(index,dir):
    for i in range(1,100):
        url=index+str(i)
        req=urllib2.Request(url)
        response=urllib2.urlopen(req).read()
        json_data=json.loads(response)["result"]["wall"]["docs"]
        for item in json_data:
            itemUrl= item["link"]
            imgs=getImgList(itemUrl)
            imgArr=imgs.split(',')
            saveImg(imgArr,dir)

if __name__ == "__main__":
    index="http://list.mogujie.com/search?_version=8193&ratio=2%3A3&ad=0&mt=10.851.r29415&_mgjuuid=a25e3b58-7f95-4fd2-9af6-6c649978d893&sort=pop&ptp=1.r5CWT._cate.0.HYdCSIY&_b_key=neiyi_0..3&userId=&showH=330&cKey=15&fcid=50031&width=220&action=neiyi&page="
    dir="D://mj/"
    make_dir(dir)
    start(index,dir)