#coding=utf-8
import multiprocessing

from crawler.nancyTool import *


#获取页面
def get_page(url,path,start,end):
 for i in range(start,end):
  noteAddr="http://jfbra.com/web_page/forum_h5/post.html?post_id="
  noteAddr=noteAddr+str(i)
  #print noteAddr
  response = urllib.urlopen(url+str(i)).read()
  try:
    get_detail(response,path,noteAddr)
  except:
    continue
 print "get_data finished"

#入口函数
def start(start,end,count,url,dir):
    process_list=[]
    index=(end-start)/count
    for i in range(count):
        process_list.append(multiprocessing.Process(target = get_page, args = (url,dir,start+index*i,start+index*(i+1))))
        #print start+index*i,start+index*(i+1)
    process_list.append(multiprocessing.Process(target = get_page, args = (url,dir,start+index*i,end+1)))
    #print start+index*i,end+1
    for process in process_list:
        process.start()

if __name__ == "__main__":
    # http://jfbra.com/web_page/forum_h5/post.html?post_id=60264
    url="http://app.jfbra.com/bra_app/post/get_share_post/"
    #dir=u"D://temp/"+str(time.time())+"/"
    #make_dir(dir)
    start(60000,80000,5,url,dir)