from selenium.common.exceptions import TimeoutException
import time
import random
from selenium import webdriver
import selenium
from collections import OrderedDict
import platform
import logging
import inspect
import bpython
import pymongo
import random
import pickle
import urllib
from pprint import pprint

TB = pymongo.MongoClient().linkedin_new.keyrange

# TB = pymongo.MongoClient('192.168.1.220',29001).linkedin.keyrange

Driver_path = './chromedriver.exe'
if platform.system() == 'Linux':
    Driver_path = './chromedriver'


# ai_kw = []

# 'Robot intelligent','Self Driving Automotive','Autonomous car','robo-advisor',
ai_kw = set(['人工智能','机器学习','深度学习','强化学习','智能机器人','聊天机器人','虚拟助理','计算机视觉','图像识别','人脸识别','文字识别','可视化搜索','指纹识别','虹膜识别','手势控制','自然语言理解','自然语言处理','文本分析','语音识别','语义网络','语义搜索','无人车','无人驾驶','自动驾驶','辅助驾驶','自动泊车','智能投顾','神经计算','神经网络','算法博弈论','知识工程','知识表示','知识图','逻辑编程','规则基系统','规则引擎','进化计算','认知计算','计算智能','超级智能','自动推理','模糊逻辑','智能代理','感知计算','计算博弈论','基于规则系统','智能主体','知觉计算','基于规则的系统','智能体','Machine learning','Deep learning','Virtual Assistant','Computer Vision','Image Recognition','Visual Search','Text Analytics','Artificial intelligence','self driving car','Automatic parking','NLP','TEXT ANALYSIS','self driving vehicle','Knowledge engineering','Rule based system','driver assistance system','Reinforcement learning','Robots','Chatterbot','Facial Recognition','text recognition','Visual Search','fingerprint recognition','Iris recognition','Gesture control','Natural language understanding','Natural language processing','Voice Recognition','Semantic Web','Semantic Search','Autonomous vehicle','autopilot','ADAS','Neurocomputing','Neural network','Robotics','chat bot','face recognition','finger print recognition','NLP','TEXT ANALYSIS','Autonomous car','auto pilot','robo-advisor','Neurocomput','Robotic','auto-pilot','robo adviser','Algorithmic game theory','Knowledge engineering','Knowledge representation','Knowledge graph','Logic programming','Rule engine','Evolutionary computation','Cognitive computing','Computational intelligence','Superintelligence','Automated reasoning','Fuzzy logic','Intelligent agent','Perceptual computing','Perceptual comput',])


FT_KEY=['p2p ; loan', 'p2p ; lend', 'p2p ; borrow', 'p2p ; finance', 'person to person ; loan', 'person to person ; lend', 'person to person ; borrow', 'person to person ; finance', 'peer to peer ; loan', 'peer to peer ; lend', 'peer to peer ; borrow', 'peer to peer ; finance', 'network ; loan', 'network ; lend', 'network ; borrow', 'network ; finance', 'online ; loan', 'online ; lend', 'online ; borrow', 'online ; finance', 'internet ; loan', 'internet ; lend', 'internet ; borrow', 'internet ; finance', 'mobil ; loan', 'mobil ; lend', 'mobil ; borrow', 'mobil ; finance',
'bitcoin ','bit coin ','block chain ','digital currency ','crypto currency ','virtual currency','Ethereum','Smart Contract ',
'Crowdfunding','Crowdsourcing',
'NETWORK ; Insurance','ONLINE ; Insurance','INTERNET ; Insurance','MOBILE ; Insurance','Insur tech','Insurance Tech',
'robo adviser','wealth assistant','invest assistant','invest advisor','Wealth Management',
'Financial Technology','IoT','Internet of Things',
'Sensor ; NETWORK','Sensor ; ONLINE','Sensor ; INTERNET','Sensor ; MOBILE','Sensor ; CLOUD',
'Positioning ; NETWORK','Positioning ; ONLINE','Positioning ; INTERNET','Positioning ; MOBILE','Positioning ; CLOUD',
'2 dimensional barcode','two dimensional barcode','Low power bluetooth','zigbee','Low power wifi','mifi','NFC',
'RFID','Internet of Vehicles','Smart Intelligent ; power grid','Smart Intelligent ; city','Smart Intelligent ; construction','Smart Intelligent ; manufacture','Smart Intelligent ; logistics','Smart Intelligent ; camera','Smart Intelligent ; speaker','Smart Intelligent ; television','Smart Intelligent ; fridge','Smart Intelligent ; refrigerator','Smart Intelligent ; washing machine','Smart Intelligent ; transit','Smart Intelligent ; appliance',
'wearable','smart watch','smart ring','smart glass'
]
# 'self-drive',

# setInterval("jQuery('.next').click()",5000); 

class Cookie_Handle:

    def __init__(self, brower_count=1):
        self.browsers = [self.init_browser(_) for _ in range(brower_count)]
        self.handle_cookie()
        self.flag = 0

    def init_browser(self,count):
        chromeOptions = webdriver.ChromeOptions()
        chromeOptions.add_argument('proxy-server=http://localhost:8080')
        chromeOptions.add_argument('ignore-certificate-errors')
        # chromeOptions.add_argument('test-type')
        # chromeOptions.add_argument('disable-images')
        # chromeOptions.add_argument('user-data-dir=./chrome')
        # chromeOptions.add_argument('disable-gpu') 
        # chromeOptions.add_argument('headless')
        # chromeOptions.add_argument('user-agent=%s'%random.choice(USER_AGENTS))
            
        browser=webdriver.Chrome(executable_path=Driver_path,chrome_options=chromeOptions)
        browser.set_page_load_timeout(300)
        browser.set_window_size(320,800)
        browser.set_window_position(180*count,50*count)

        return browser

    def handle_cookie(self):
        cookies = []
        try:
            with open('cookie.cache','rb')as f:
                cookies = pickle.load(f)
        except:pass

        for browser in self.browsers:
            browser.get('https://www.linkedin.com/')
            if cookies:
                cks = cookies.pop()
                for ck in cks:
                    browser.add_cookie(ck)
                browser.refresh()
            # print('add_cookie finish')
        input("please login':\n")

        for browser in self.browsers:
            try:
                cookies.append(browser.get_cookies())
            except Exception as e:
                print(e)

        with open('cookie.cache','wb')as f:
            pickle.dump(cookies,f)

    def get_page(self, url):

        self.flag+=1
        b_id = self.flag%len(self.browsers)
        browser = self.browsers[b_id]
        try:

            browser.get(url)
            time.sleep(1)
        except Exception as e:
            self.flag += 1
            raise e
        time.sleep(2)
        try:
            num = browser.find_elements_by_css_selector('.results-count-string')[0].text.replace('Showing ','').replace(' results','').replace(',','').replace(' result','')
        except:
            print('no num')
            num = 100
        num = int(num)
        return num


    def exh_page(self,num):
        b_id = self.flag%len(self.browsers)
        browser = self.browsers[b_id]
        for _ in range(200):
            browser.execute_script("$(window).scrollTop(9000);$('.next').click()")
            time.sleep(2)
            if 'start' in browser.current_url and  num - int(browser.current_url.split('=')[-1]) <26:
                return True
        return False


def get_log(name):
    log = logging.getLogger(name)

    fh = logging.FileHandler('%s.log'%name,)
    fh.setLevel(logging.DEBUG)

    sh = logging.StreamHandler()
    sh.setLevel(logging.DEBUG)

    log.addHandler(fh)
    log.addHandler(sh)

    log.setLevel(logging.DEBUG)
    return log

CM_LOG = get_log('chrome')



class Pages:
    El = [2,3,4,0,6,1,5]

    Countrys = ['{x}%3A0'.format(x=x) for x in ('fr','de','gb','ca','au','jp','kr','ru','ch','nl','es','sg','il','in','cn','us')]

    Countrys_map = {'us%3A0':['STATES.us.{x}'.format(x=x) for x in ('al','ak','az','ar','ca','co','ct','de','fl','ga','hi','id','il','in','ia','ks','ky','la','me','md','ma','mi','mn','ms','mo','mt','ne','nv','nh','nj','nm','ny','nc','nd','oh','ok','or','pa','ri','sc','sd','tn','tx','ut','vt','va','wa','wv','wi','wy')],
    }

    States_map = {'STATES.us.ca':['%s, California'%x for x in ('Alameda','Alpine','Amador City','Butte City','Colusa','Irvine','El Dorado','Fresno','Glenn','Imperial','Los Angeles','Madera','Marin','Mariposa','Mendocino','Merced','Monterey','Napa','Nevada City','Orange','Riverside','Sacramento','San Diego','San Francisco','San Joaquin','San Luis Obispo','San Mateo','Santa Barbara','Santa Clara','Santa Cruz','Shasta','Sutter','Tulare','Tuolumne','Yolo','Yuba City',)],
        'STATES.us.wa':['%s, Washington'%x for x in ('Seattle','Redmond','Bellevue','Kirkland,','Issaquah','Spokane')]  

    }

    # LocationidTree = {
    #     'OTHERS.worldwide':['{x}%3A0'.format(x=x) for x in ('fr','de','gb','ca','au','jp','kr','ru','ch','nl','es','sg','il','in','cn','us')],
    #     'us%3A0':['STATES.us.{x}'.format(x=x) for x in ('al','ak','az','ar','ca','co','ct','de','fl','ga','hi','id','il','in','ia','ks','ky','la','me','md','ma','mi','mn','ms','mo','mt','ne','nv','nh','nj','nm','ny','nc','nd','oh','ok','or','pa','ri','sc','sd','tn','tx','ut','vt','va','wa','wv','wi','wy')]
    #    }

    # LocationTree = {
    #    'STATES.us.ca':['%s, California'%x for x in ('Alameda','Alpine','Amador City','Butte City','Colusa','Irvine','El Dorado','Fresno','Glenn','Imperial','Los Angeles','Madera','Marin','Mariposa','Mendocino','Merced','Monterey','Napa','Nevada City','Orange','Riverside','Sacramento','San Diego','San Francisco','San Joaquin','San Luis Obispo','San Mateo','Santa Barbara','Santa Clara','Santa Cruz','Shasta','Sutter','Tulare','Tuolumne','Yolo','Yuba City',)],
    #     'STATES.us.wa':['%s, Washington'%x for x in ('Seattle','Redmond','Bellevue','Kirkland,','Issaquah','Spokane')]    
    # }

    def get_url(self,kw,arg_url=None):
        if not arg_url:
            flag = '%s'%kw

            if not TB.find_one({'_id':flag}):
                url = 'https://www.linkedin.com/jobs/search/?keywords={kw}&locationId=OTHERS.worldwide&sortBy=DD'
                return [[url.format_map({'kw':kw}),flag],]
            else:
                return []

        ups = urllib.parse.urlparse(arg_url)
        args_dic = {}
        for kv in ups.query.split('&'):
            args_dic[kv.split('=')[0]] = kv.split('=')[1]

        ret_urls = []
        if 'locationId' in args_dic:
            _locationId = args_dic['locationId']

            # countrys
            if _locationId == 'OTHERS.worldwide':
                if 'f_E' in args_dic:
                    _f_E = args_dic['f_E']
                    url = 'https://www.linkedin.com/jobs/search/?f_E={f_E}&keywords={kw}&locationId={locate_id}&sortBy=DD'
                    for country in Pages.Countrys:
                        flag = '%s,%s,%s'%(kw,country,_f_E)
                        if not TB.find_one({'_id':flag}):
                            ret_urls.append([url.format_map({'f_E':_f_E,'locate_id':country,'kw':kw}), flag])
                    return ret_urls
                else:
                    url = 'https://www.linkedin.com/jobs/search/?f_E={f_E}&keywords={keywords}&locationId={locationId}&sortBy=DD'
                    for el in Pages.El:
                        flag = '%s,%s,%s'%(kw,_locationId,el)
                        if not TB.find_one({'_id':flag}):
                            ret_urls.append([url.format_map({'f_E':el,'locationId':args_dic['locationId'],'keywords':kw}),flag])
                    return ret_urls

            # countrys more
            if _locationId in Pages.Countrys:

                if 'f_E' in args_dic:
                    url = 'https://www.linkedin.com/jobs/search/?f_E={f_E}&keywords={kw}&locationId={locate_id}&sortBy=DD'
                    _f_E = args_dic['f_E']
                    if _locationId in Pages.Countrys_map:
                        for states in Pages.Countrys_map[_locationId]:
                            flag = '%s,%s,%s'%(kw,states,_f_E)
                            if not TB.find_one({'_id':flag}):
                                ret_urls.append([url.format_map({'f_E':_f_E,'locate_id':states,'kw':kw}), flag])
                        return ret_urls
                    else:
                        return None
                # else:
                #     url = 'https://www.linkedin.com/jobs/search/?f_E={f_E}&keywords={keywords}&locationId={locationId}&sortBy=DD'
                #     for el in Pages.El:
                #         flag = '%s,%s,%s'%(kw,_locationId,el)
                #         if not TB.find_one({'_id':flag}):
                #             ret_urls.append([url.format_map({'f_E':el,'locationId':args_dic['locationId'],'keywords':kw}),flag])
                #     return ret_urls
            elif _locationId in Pages.States_map:
                _f_E = args_dic['f_E']
                url = 'https://www.linkedin.com/jobs/search/?f_E={f_E}&keywords={kw}&location={location}&sortBy=DD'
                for locate in Pages.States_map[_locationId]:
                    flag = '%s,%s,%s,%s'%(kw,_locationId,locate,_f_E)
                    if not TB.find_one({'_id':flag}):
                        ret_urls.append([url.format_map({'location':locate,'f_E':_f_E,'kw':kw}), flag])
                return ret_urls

# def get_url(self,kw,arg_url=None):
#         if not arg_url:
#             url = 'https://www.linkedin.com/jobs/search/?keywords={kw}&locationId=OTHERS.worldwide&sortBy=DD'
#             return [[url.format_map({'kw':kw}),'%s'%kw],]
#         ups = urllib.parse.urlparse(arg_url)
#         args_dic = {}
#         for kv in ups.query.split('&'):
#             args_dic[kv.split('=')[0]] = kv.split('=')[1]

#         if 'locationId' in args_dic:
#             _locationId = args_dic['locationId']


#             if 'location' in args_dic:
#                 _location = args_dic['location']

#                 if 'f_E' in args_dic:
#                     return None
#                 else:
#                     url = 'https://www.linkedin.com/jobs/search/?keywords={keywords}&f_E={f_E}&location={location}&locationId={locationId}&sortBy=DD'
#                     ret_urls = []
#                     for el in Pages.El:
#                         flag = '%s,%s,%s,%s'%(kw,_locationId,_location,el)
#                         if not TB.find_one({'_id':flag,'unfull':None}):
#                             ret_urls.append([url.format_map({'f_E':el,'locationId':args_dic['locationId'],'location':args_dic['location'],'keywords':kw}),flag])
#                     return ret_urls
#             else:
#                 ret_urls = []
#                 if _locationId in Pages.LocationidTree:
#                     url = 'https://www.linkedin.com/jobs/search/?keywords={kw}&locationId={locate_id}&sortBy=DD'
#                     locates = Pages.LocationidTree[_locationId]
#                     for locate in locates:
#                         flag = '%s,%s'%(kw,locate)
#                         if not TB.find_one({'_id':flag,'unfull':None}):
#                             ret_urls.append([url.format_map({'locate_id':locate,'kw':kw}), flag])
#                     return ret_urls
#                 if _locationId in Pages.LocationTree:
#                     url = 'https://www.linkedin.com/jobs/search/?keywords={kw}&location={locate}&locationId={locate_id}&sortBy=DD'
#                     locates = Pages.LocationTree[_locationId]
#                     for locate in locates:
#                         flag = '%s,%s,%s'%(kw,_locationId,locate)
#                         if not TB.find_one({'_id':flag,'unfull':None}):
#                          ret_urls.append([url.format_map({'locate':locate,'locate_id':args_dic['locationId'],'kw':kw}), flag])
#                     return ret_urls

#                 if 'f_E' in args_dic:
#                     return None
#                 else:
#                     ret_urls = []
#                     url = 'https://www.linkedin.com/jobs/search/?f_E={f_E}&keywords={keywords}&locationId={locationId}&sortBy=DD'
#                     for el in Pages.El:
#                         flag = '%s,%s,%s'%(kw,_locationId,el)
#                         if not TB.find_one({'_id':flag,'unfull':None}):
#                             ret_urls.append([url.format_map({'f_E':el,'locationId':args_dic['locationId'],'keywords':kw}),flag])
#                     return ret_urls


def test():

    pg = Pages()
    # ret = pg.get_url('cc','https://www.linkedin.com/jobs/search/?keywords=Machine%20learning&locationId=us%3A0')
    # ret = pg.get_url('cc','https://www.linkedin.com/jobs/search/?keywords=cc&locationId=STATES.us.ca&sortBy=DD')
    # ret = pg.get_url('cc','https://www.linkedin.com/jobs/search/?keywords=cc&location=Alameda, California&locationId=STATES.us.ca&sortBy=DD')
    ret = pg.get_url('cc')
    ret = pg.get_url('cc','https://www.linkedin.com/jobs/search/?keywords=cc&locationId=OTHERS.worldwide&sortBy=DD')
    # ret = pg.get_url('cc','https://www.linkedin.com/jobs/search/?f_E=0&keywords=cc&locationId=OTHERS.worldwide&sortBy=DD')
    # ret = pg.get_url('cc','https://www.linkedin.com/jobs/search/?f_E=0&keywords=cc&locationId=us%3A0&sortBy=DD')
    # ret = pg.get_url('cc','https://www.linkedin.com/jobs/search/?f_E=0&keywords=cc&locationId=STATES.us.ca&sortBy=DD')
    # ret = pg.get_url('cc','https://www.linkedin.com/jobs/search/?f_E=0&keywords=cc&location=Alameda, California&sortBy=DD')

    pprint(ret)


# local = inspect.currentframe().f_locals
# bpython.embed(local)

import arrow

def main():
  
    browser = Cookie_Handle(8)

    # 刷新页面
    # browser.refresh()
    # country = USA()
    page = Pages()


    for kw in FT_KEY:

        urls = page.get_url(kw)

        while urls:
            url, flag = urls.pop()
            while 1:
                try:
                    print(flag)
                    num = browser.get_page(url)
                    dt = arrow.now().floor('day').datetime
                    if num<26:
                        TB.insert_one({'_id':flag,'update':dt,'status':True,'count':num,})

                    if 25<num <1001:
                        # do job
                        if browser.exh_page(num):
                            TB.insert_one({'_id':flag,'update':dt,'status':True,'count':num,})

                    if num > 1025:
                        more_urls = page.get_url(kw,url)
                        if more_urls:
                            urls.extend(more_urls)
                        elif isinstance(more_urls,list):
                            pass
                        else:
                            if browser.exh_page(1000):
                                TB.update_one({'_id':flag},{'$set':{'status':False,'count':num,'update':dt}},upsert=True)
                                CM_LOG.error(flag)
                    break
                except pymongo.errors.DuplicateKeyError as e:
                    print(e)
                    print(url)
                    break
                except Exception as e:
                    print(e)
                    logging.exception(e)

         
    print('over')
    time.sleep(300)
if __name__ == '__main__':

    try:
        main()
        # test()
    except selenium.common.exceptions.WebDriverException as e:
        print(e)