import requests
from etc.logger import get_logger
import logging
from multiprocessing.dummy import Pool as ThreadPool
import time
import os


with open('not_exit')as f:
    not_exit_list = {int(each) for each in f.readlines()}
with open('not_active')as f:
    not_active_list = {int(each) for each in f.readlines()}
with open('finIsh')as f:
    finish_list = {int(each) for each in f.readlines()}

out_path = 'd://pages/'
# finish_list = {int(each.strip('.html')) for each in os.listdir(out_path)}


# for file in os.listdir(out_path):
#     file_path = os.path.join(out_path,file)
#     with open(file_path,encoding = 'utf8') as f:
#         text = f.read()
#     if "We're sorry, but the company you are looking for is not active." in text:
#         os.remove(file_path)
#         not_active_loger.info(file.strip('.html'))
# exit()


def get_recorder_logger(log_name):
    my_logger = logging.getLogger(log_name)
    my_logger.setLevel(logging.DEBUG)
    if not my_logger.handlers:
        # logging format
        fmt = logging.Formatter('%(message)s')
        # filehandler
        fh = logging.FileHandler('%s'%log_name)
        fh.setFormatter(fmt)
        fh.setLevel(logging.DEBUG)
        my_logger.addHandler(fh)
    return my_logger

NOT_ACTIVE_LOGER = get_recorder_logger('not_active')
NOT_EXIT_LOGER = get_recorder_logger('not_exit')
FINISH_LOGER = get_recorder_logger('finIsh')
HEADERS = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.84 Safari/537.36'}
COOKIES = {'Cookie':'bcookie="v=2&15639eb2-e71e-43d5-8e00-929086b06e77"; bscookie="v=1&2016061912490169890d04-8e37-4cdb-82d3-f5b4a8b0dccfAQHPWbv9oyuwGUBu63JIyReoGGB8xkOg"; JSESSIONID="ajax:3959312458822430371"; li_at=AQEDAR5_SMAEr52xAAABVWtWoI0AAAFVa8R9jU0ATgZHtsX1nqvW_fOa-S3cZkvNTkOVp_OuwyOqyDeGCm6ZXEVpe2pt4FnZ_PA86Hh7xR6RP-owSlfXjGDCP1BNMWKV2RFRvuSQhFFB5FM3Rn6p4wRc; liap=true; lidc="b=VGST05:g=83:u=1:i=1466384687:t=1466470797:s=AQGCuC_yfjyow_xpn7AP9IQiyDvPS3UH"; oz_props_fetch_size1_undefined=undefined; wutan=dL6LxbVv5cMbp/IIptNV2wl02BmDBWp75RQClHnYk2s=; share_setting=PUBLIC; visit="v=1&M"; _ga=GA1.2.1332315777.1466345090; sdsc=1%3A1SZM1shxDNbLt36wZwCgPgvN58iw%3D; _lipt=0_aoD48OvK0DfctE1a1DV5y1H2BJ3OKaJn24gQ6KcH1ud8YBq0mkAl3KWYwLhkiNJ-KK1cEjM1fO0m_PkJuwmvh4-EY-KZiPIQJvv5YylKA5S; lang="v=2&lang=en-us"'}
URL = 'http://www.linkedin.com/company/%s'
LOGGER = get_logger('getpagenew')

def work_group(company_id):
    while True:
#        proxies = {
#            'http:': '',
#            'https:': ''
#        }
        proxies = None
        req = get_page(company_id, proxies)
        if req:
            break
        else:
            # todo
            pass


def get_page(company_id, proxies):
    repeat_num = 0
    while True:
        repeat_num += 1
        try:
            req = requests.get(URL % company_id, headers=HEADERS,cookies=COOKIES, timeout=10, proxies=proxies)
            if req.status_code == 200 and 'LinkedIn' in req.text:
                if "We're sorry, but the company you are looking for does not exist." in req.text:
                    LOGGER.info('%s is  not exit' % company_id)
                    NOT_EXIT_LOGER.info(company_id)
                    return True
                if "We're sorry, but the company you are looking for is not active." in req.text:
                    LOGGER.info('%s is  not active' % company_id)
                    NOT_ACTIVE_LOGER.info(company_id)
                    return True
                else:
                    with open('d://pages/%s.html'%company_id,'wb')as f:
                        f.write(req.content)
                        LOGGER.info('%s has been storage' % company_id)
                        FINISH_LOGER.info(company_id)
                    return True
            else:
                LOGGER.error('%s : %s' % (company_id, req.status_code))
                time.sleep(5)
        except Exception as e:
            LOGGER.warning('%s : %s' % (company_id, e))
            try:
                time.sleep(10)
            except Exception as e:
                LOGGER.error('%s : sleep is except' % e)
        if repeat_num > 10:
            return False

def main():
    work_ids = {i for i in range(1000000, 1100000)}
    work_ids -= not_exit_list
    work_ids -= not_active_list
    work_ids -= finish_list


    #for i in range(1000, 90000):
    #    if i not in files:
    #        work_ids.append(i)

    pool = ThreadPool(15)
    pool.map(work_group, work_ids)

if __name__ == '__main__':
    main()
