
#import threading

#class myThread(threading.Thread):
#    def run(self):
#        x += 10
#        print('%s:%d' %(self.name,x))

#class myThread1(threading.Thread):
#    def run(self):
#        x -= 10
#        print('%s:%d' %(self.name,x))
#x = 0
#def main():
#    t = []
#    for i in range(10):
#        t.append(myThread())
#        t.append(myThread1())

#    for tt in t:
#        tt.start()

#if __name__ == '__main__':
#    main()
#    print('结束')





#from urllib.request import urlretrieve
#import time
#import random
#start=time.time()
##f=open('E:\Python\py\web\hh.txt','r')#打开存放URL的文件
##a=f.readlines()
##f.close()
#for i in range(0,100):
#    b=random.randint(0,30)
#    urlretrieve('http://www.chenjinwei.com/test.txt','F:\\PythonDemo\\Crawl_Demo\\Crawl_Demo\\Test3\\m4d\\%d.txt'%i) 
#end=time.time()
#print(end-start)





from urllib.request import urlretrieve
import queue  
import threading   
import random
import time

num = 0
lock = threading.RLock()

class download(threading.Thread):  
    def __init__(self,que):  
        threading.Thread.__init__(self)  
        self.que=que  
    def run(self):  
        global num
        lock.acquire()      #上锁，acquire()和release()之间的语句一次只能有一个线程进入，其余线程在acquire()处等待  
        num += 1
        lock.release()      #解锁  

        if not self.que.empty():   
            host=self.que.get()
            a=random.randint(0,30000)
            urlretrieve(host,'F:\\PythonDemo\\Crawl_Demo\\Crawl_Demo\\Test3\\m4d\\%s.txt'%str(num)) 

        
        
def Down():  
    #f=open('E:\Python\py\web\hh.txt','r')
    #a=f.readlines()
    #f.close()
    que=queue.Queue() 
    threads=[]
    for i in range(0,100):  
        que.put('http://www.chenjinwei.com/test.txt')  
    for i in range(0,100):  
        d=download(que) 
        threads.append(d)
    for i in threads:
        i.start()
    for i in threads:
        i.join()
  
if __name__=='__main__':   
    start=time.time()
    Down()  
    end=time.time()
    print(end-start)