import requests
from etc.logger import get_logger
from multiprocessing.dummy import Pool as ThreadPool
import time
import os

headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.84 Safari/537.36'}

url = 'http://www.linkedin.com/company/%s'
logger = get_logger('getpage')

def get_page(company_id):
    while True:
        try:
            req = requests.get(url%company_id, headers=headers,timeout=10)
            if req.status_code == 200:
                if "We're sorry, but the company you are looking for does not exist." in req.text:
                    logger.debug('%s is  not exit'%company_id)
                    return
                else:
                    with open('d://pages/%s.html'%company_id,'wb')as f:
                        f.write(req.content)
                        logger.debug('%s has been storage'%company_id)
                    return
        except Exception as e:
            logger.warning('%s : %s'%(company_id, e))
            try:
                time.sleep(10)
            except Exception as e:
                logger.error('%s : sleep is except'%e)

# work_ids = [i for i in range(2567, 10816030)]


work_ids = [i for i in range(90000, 100000)]

files = [int(file.strip('.html')) for file in os.listdir('d://pages/')]

for i in range(80000, 90000):
    if i not in files:
        work_ids.append(i)

pool = ThreadPool(4)
pool.map(get_page, work_ids)
