from mymodule.mythread import *
##assert 0
from time import sleep
from re import findall as fa
from requests import get
url_simple='https://openapi.book118.com/getPreview.html?&project_id=1&aid=157716684&view_token=5@zE0Li2TCx4_Wo7lwMO928AwzlTPuUV&page=%s&callback=jQuery17104229868119150848_1569118074019&_=1569119255155'
list_page_n=list(range(1,769,6))
print(list_page_n)
##input()
list_url=[url_simple%x for x in list_page_n]
print(len(list_url))
headers={
##    CLIENT_SYS_UN_ID=wKh2GV2GzDefJyN1mXBmAg==; __cfduid=d595856b0373693d47616e8374722744d1569115195
    'Cookie':'CLIENT_SYS_UN_ID=wKh2GV2GzDefJyN1mXBmAg==; __cfduid=d595856b0373693d47616e8374722744d1569115195; PHPSESSID=hsquj2naqdjttlm98aq4bfqpa3; 94ca48fd8a42333b_weixinattention_getweixindatas=1569115577%2C1',
    'User-Agent':'Mozilla/5.0 (X11; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0'
    }
##a=get(list_url[1],headers=headers)

def split(l,n):
    n_averge=len(l)//n
    list_return=[]
    for x in range(0,len(l),n_averge):
##        print(x+1,x+1+n_averge)
        list_return.append(l[x:x+n_averge])
##        print(l[x:x+n_averge])
##        assert 0
    list_return.append(l[n_averge*n:])
    return list_return
##assert 0
##l=split(list_url,6)
dict_page_url={}
list_error=[]
error_exist=0
def acquire_real_url(list_u):
    for x in list_u:
##        print(x)
##        continue
        retry=0
        while 1:
            cnt=get(x).text
            re_rst=dict(fa(r'"(\d+)":.+?(view.+?.png)',cnt))
            retry+=1
            if retry>=10:
                global error_exist
                if not error_exist:
                    list_error.append(x)
                print('Error '+'-'*30)
                break
            if len(re_rst)==6:
                break
            else:
                sleep(5)
##        re_rst=[]
        for y in re_rst:
            re_rst[y]='https://'+re_rst[y].replace('\\','')
        dict_page_url.update(re_rst)
        print(len(dict_page_url))
##acquire_real_url(split(list_url,10)[0])
a=split(list_url,10)
myThread(acquire_real_url,a)
print('Have pass the fisrt spider!')
for x in range(5):
    print('*'*50)
error_exist=1

myThread(acquire_real_url,split(list_error,10))
print('Have  deal with errot')
print('result'+'*'*40)
print(len(dict_page_url))

error_image=0
image_n=0
list_image_error=[]

def acquire_image(list_u):
    for x in list_u:
        retry=0
        while 1:
            cnt=get(x[1]).content
            retry+=1
            if len(cnt)>5000:
                print('ok')
                with open('gif/'+x[0]+'.gif','wb') as f:
                    f.write(cnt)
                global image_n
                image_n+=1
                break
            else:
                sleep(5)
            if retry>=10:
                global error_image
                if not error_image:
                    list_image_error.append(x)
                print('Error '+'-'*30)
                break
        print(image_n)


acquire_image(split(dict_page_url,10))

