# -*- coding=UTF-8 -*-
import requests
import time
import re
from bs4 import BeautifulSoup
from functools import wraps
import pickle
from pymongo.errors import DuplicateKeyError

import threading
from connmongo import MongoConn
from getstockcom import Getstockcom

login_url = "https://www.qichacha.com/user_loginaction"
index_url = "https://www.qichacha.com/"
username = 15215663921
password = 'a14879'

session = requests.session()
success_num = 0

# headers = {'Host':'www.qichacha.com','Referer':'https://www.qichacha.com/search_adsearch',
#            'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36 Edge/15.15063'
#            ,"Connection":"keep-alive"}
# cookie = {'PHPSESSID':'bpjeamb4dno97oolqnoleapeq4','expires':'Tue, 11-Sep-2018 01:48:52 GMT', 'Max-Age':'604800'}

# 获取N页的公司明细链接
class qichacha():
    def __init__(self):
        self.headers = {'Host':'www.qichacha.com','Referer':'https://www.qichacha.com/search_adsearch',
           'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36 Edge/15.15063'
           ,"Connection":"keep-alive"}
        self.cookie = {'PHPSESSID':'bpjeamb4dno97oolqnoleapeq4','expires':'Tue, 11-Sep-2018 01:48:52 GMT', 'Max-Age':'604800'}

    def get_details(self,region,n):
        urls = ['https://www.qichacha.com/g_{}_{}.html'.format(region,i) for i in range(1,n+1)]
        result = []
        for url in urls:
            res = session.get(url,headers=self.headers,cookies=self.cookie)
            if res.status_code == 200:
                sp = BeautifulSoup(res.text,features='lxml')
                sp2 = sp.find_all('section','panel panel-default')
                for it in sp2:
                    rst = it.find('a')
                    href = "https://www.qichacha.com/" + rst['href']
                    result.append(href)
        return result

    # 获取所有上市公司的url
    def geturls(self):
        test = Getstockcom(url="https://www.qichacha.com/elib_ipo.shtml", pageno=357)
        res = test.getall()
        urls = ["https://www.qichacha.com" + it[-1] for it in res]
        return urls

    def get_table(self,sp,a_col=()):
        if sp:
            info = sp.find_all('tr')
            result = {}
            if info:
                v = info[0]
                info_titles = [i.get_text().strip() for i in v.find_all('th')]
                n = len(info_titles)
                pre_lists = locals()
                for i in range(n):
                    pre_lists["info_ac" + str(i)] = []
                    pre_lists["info_c" + str(i)] = []
                    pre_lists["info_r" + str(i)] = []
                    if a_col and i in a_col:
                        pre_lists["info_m" + str(i)] = []
                        pre_lists["info_n" + str(i)] = []
                        pre_lists["info_o" + str(i)] = []
                        pre_lists["info_mm" + str(i)] = []
                        pre_lists["info_nn" + str(i)] = []
                        pre_lists["info_oo" + str(i)] = []
                for v2 in info[1:]:
                    info_val = v2.find_all('td')
                    for i in range(n):
                        if i not in a_col:
                            pre_lists["info_r" + str(i)] = info_val[i].get_text(strip=True)
                            pre_lists["info_c" + str(i)].append(pre_lists["info_r" + str(i)])
                            pre_lists["info_ac" + str(i)] = pre_lists["info_c" + str(i)]
                        else:
                            pre_lists["info_m" + str(i)] = info_val[i].get_text().replace('>', '').strip()
                            pre_lists["info_mm" + str(i)].append(pre_lists["info_m" + str(i)])
                            try:
                                pre_lists["info_n" + str(i)] = info_val[i].a['href']
                            except TypeError:
                                pre_lists["info_n" + str(i)] = ""
                            pre_lists["info_nn" + str(i)].append(pre_lists["info_n" + str(i)])
                            pre_lists["info_o" + str(i)] = info_val[i].find(attrs={'class': 'btn-touzi'}).get_text().replace('>', '').strip()
                            pre_lists["info_oo" + str(i)].append(pre_lists["info_o" + str(i)])
                            pre_lists["info_ac" + str(i)] = list(zip(pre_lists["info_mm" + str(i)], pre_lists["info_nn" + str(i)], pre_lists["info_oo" + str(i)]))
                result = dict(zip(info_titles, [pre_lists["info_ac" + str(i)] for i in range(n)]))
            return result

    # 获取公司详细信息
    def get_content(self,url):
        print("正在爬取的公司Url为：---%s" % url)
        res = session.get(url, headers=self.headers, cookies = self.cookie)
        result = {}
        result['info0'],result['info1'],result['info2'],result['info3'],result['info4'] = {},{},{},{},{}
        if res.status_code == 200:
            # print("text",res.text)
            sp = BeautifulSoup(res.text, features='lxml')
            try:
                tel = sp.find(attrs={'class':'clearfix font-15 m-b-sm'}).find(attrs={'class':'pull-left'}).get_text()
            except AttributeError:
                try:
                    tel = sp.find(attrs={'class':'PhoneNumber'}).get_text()
                except AttributeError:
                    tel = None
            result['tel'] = tel
            email = sp.find_all(attrs={'type':'hidden'})
            if len(email) > 0:
                for i in email:
                    if str(i).find("to_company_email") > 0:
                        res = re.search('[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+\.\w*',str(i))
                        if res:
                            result['email'] = res.group()

            info0 = sp.find(attrs={'class':'content'})
            if info0:
                try:
                    result['com_name'] = info0.find(attrs={'class':'row title'}).h1.get_text().strip()
                except AttributeError:
                    result['com_name'] = info0.find(attrs={'class': 'row title'}).get_text(strip=True)
                try:
                    result['com_status'] = info0.find(attrs={'class':'row title'}).find(attrs={'class':'nstatus'}).get_text().strip()
                except AttributeError:
                    result['com_status'] = None
                allinfo = info0.find_all(attrs={'class':'row'})
                keys_all,vals_all = [],[]
                for k,i in enumerate(allinfo):
                    keys = i.find_all(attrs={'class':'cdes'})
                    vals = i.find_all(attrs={'class':'cvlu'})
                    keys2 = [i.get_text().strip() for i in keys]
                    vals2 = [i.get_text().strip() for i in vals]
                    keys_all.extend(keys2)
                    vals_all.extend(vals2)
                result['info0'] = dict(list(zip(keys_all,vals_all)))
            com_info = sp.find(id='Cominfo')    #公司信息
            stock_info = sp.find(id='Sockinfo')     #股东信息
            syr_list = sp.find(id='syrlist')       #最终受益人
            mainmember = sp.find(id='Mainmember')     #主要人员
            touzilist = sp.find(id='touzilist')     #对外投资
            holdcolist = sp.find(id='holdcolist')   #控股企业
            changelist = sp.find(id='Changelist')   #变更记录
            infodates = ['2018-06-30','2018-03-31','2017-12-31','2017-09-30','2017-06-30']
            # m_cominfolist = sp.find(id='2018-06-30')    #十大股东-2018-06-30
            # 公司信息
            info1 = com_info.find_all('td')
            info_val = [i.get_text().strip().replace('：','') for i in info1]
            result['info1'] = {info_val[i]:info_val[i+1] for i in range(len(info_val)) if i % 2 == 0}
            # 股东信息
            try:
                result['info2'] = self.get_table(stock_info,[1])
            except AttributeError:
                result['info2'] = self.get_table(stock_info)
            # 最终受益人
            result['info3'] = self.get_table(syr_list)
            # 主要人员
            try:
                result['info4'] = self.get_table(mainmember,[1])
            except AttributeError:
                result['info4'] = self.get_table(mainmember)
            # 对外投资
            try:
                result['info5'] = self.get_table(touzilist,[1])
            except AttributeError:
                result['info5'] = self.get_table(touzilist)
            # 控股企业
            result['info6'] = self.get_table(holdcolist)
            # 变更记录
            result['info7'] = self.get_table(changelist)
            # 十大股东
            res8 = []
            for iddate  in infodates:
                m_cominfolist = sp.find(id=iddate)
                res8.append(dict(key=iddate,value=self.get_table(m_cominfolist)))
            result['info8'] = res8
        return result

    # 根据区域获取公司url
    def get_all_by_region(self,region,n):
        result = []
        urls = self.get_details(region,n)
        for url in urls:
            # login_qichacha()
            print("url:",url)
            res = self.get_content(url)
            result.append(res)
            time.sleep(5)
        return result
    # 保存数据
    def save_data(self,res):
        assert isinstance(res,(dict,list)), '参数必须为字典或列表形式'
        conn = MongoConn()
        success_num = 0
        if isinstance(res,dict):
            res2 = [res]
        else:
            res2 = res
        if conn:
            for it in res2:
                try:
                    conn.db['qichacha'].insert_one(res)
                    success_num += 1
                except DuplicateKeyError:
                    pass
                except Exception as e:
                    print(str(e))
        return success_num

    # 根据公司url获取内容
    def get_all_by_comurl(self,urls=None):
        if not urls:
            urls = self.geturls()
        else:
            with open("crawed.txt", 'r') as f:
                crawed_url = f.read().split(',')
                print(crawed_url)
            uncrawed_url = set(urls).difference(set(crawed_url))
            print("uncrawed_url",uncrawed_url)
            if uncrawed_url:
                for url in uncrawed_url:
                    print("url:",url)
                    res = self.get_content(url)
                    sucflag = self.save_data(res)
                    if sucflag > 0:
                        global  success_num
                        success_num += sucflag
                        with open("crawed.txt",'a') as f:
                            f.write(url + ',')
                    time.sleep(3)
                    print("共存入数据库%s条数据" % success_num)
        return success_num
    #run all
    def getres(self,inlst):
        res = self.get_all_by_comurl(urls=inlst)
        return res

    # 保存上市公司已经爬取的Url
    def save_comurls(self,comurls):
        with open("comurls.pkl",'wb') as f:
            res = pickle.dump(comurls,f)
            if res:
                print("保存成功!")
        return res
    # 读取上市公司已经爬取的url
    def read_comurls(self,file='comurls.pkl'):
        with open(file,'rb') as f:
            comurls = pickle.load(f)
            return comurls



test = qichacha()
# res = test.geturls()
# test.save_comurls(res)
comurls = test.read_comurls()
test.getres(comurls[3000:])
#
# with open('crawed.txt','r') as f:
#     print(f.read())

print("程序执行完毕！")

# 建立多个线程
# comurls = geturls()

# threads = []
# nums = int(len(comurls)/70) + 1
#
# for i in range(nums):
#     t = threading.Thread(target=getres,args=(comurls[i*70:(i+1)*70],))
#     threads.append(t)
#
# for i in range(nums):
#     threads[i].start()
# for i in range(nums):
#     threads[i].join()
# getres(comurls[400:410])

# print("程序执行完毕！")

