import os
import shutil
from pymongo import MongoClient
from bs4 import BeautifulSoup
import json
from multiprocessing.dummy import Pool


if not os.path.exists('success_companys'):
    os.mkdir('success_companys')
if not os.path.exists('fail_companys'):
    os.mkdir('fail_companys')

PAGE_PATH = r'C:\Users\ider\Desktop\temp\pages'

FILES = os.listdir(PAGE_PATH)
PARSERS = ['lxml', 'html.parser', 'html5lib']
SUCCESSPATH = 'success_companys'
FAILPATH = 'fail_companys'
CONNECTION = MongoClient('192.168.0.220', 27017)['linkedin_page']['companys']


def de_dic(dic_tmp):
    '''
    清空字典中的空值
    :param dic_tmp:
    :return:
    '''
    nolis = []
    for k, v in dic_tmp.items():
        if not v:
            nolis.append(k)
    for k in nolis:
        del (dic_tmp[k])


def work(file):
    '''
    先用三个适配器找出最大匹配的soup,如果小于4就另行处理
    :param file:
    :return:
    '''
    leng_lis = []
    soup_lis = []
    for parser in PARSERS:
        soup = BeautifulSoup(open(os.path.join(PAGE_PATH, file), encoding='utf8'), parser)
        soups = soup.find_all('code')
        leng_lis.append(len(soups))
        soup_lis.append(soup)

    length = max(leng_lis)
    if length < 5:
        shutil.move(os.path.join(PAGE_PATH, file), os.path.join(FAILPATH, file))
        return
    soup_id = leng_lis.index(length)
    soup = soup_lis[soup_id]
    dic_s = []
    for code in soup.find_all('code'):
        if code.string.strip():
            dic_tmp = json.loads(code.string)
            de_dic(dic_tmp)
            dic_s.append(dic_tmp)

    for i in range(1, len(dic_s)):
        dic_s[0].update(dic_s[i])
    del(dic_s[0]['trackingUuid'])
    CONNECTION.insert_one(dic_s[0])
    shutil.move(os.path.join(PAGE_PATH, file), os.path.join(SUCCESSPATH, file))


def main():
    pool = Pool(10)
    pool.map(work, FILES)

if __name__ == '__main__':
    main()