# -*- utf-8 -*-
# author : li shi jie
# Email : yr785339493@qq.com

import requests
from urllib.parse import urlencode
requests.packages.urllib3.disable_warnings()
from bs4 import BeautifulSoup
import re
from sql import Mysql
import pymysql

class Wx_spider:

    def __init__(self):

        self.header = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
            'Cookie': 'mediav=%7B%22eid%22%3A%22163230%22%2C%22ep%22%3A%22%22%2C%22vid%22%3A%227ZCRIS8CG9%3C--eRV46jr%22%2C%22ctn%22%3A%22%22%7D; _data_chl=key=baiduaxg&word=6KW/55Oc5pWw5o2u; SaveUserName=18767192025; ASP.NET_SessionId=nnxzn25qwwzpkfq5a4ukynwb; Qs_lvt_194035=1538992384%2C1540174341%2C1540179279%2C1540180868%2C1540181371; Hm_lvt_91a409c98f787c8181d5bb8ee9c535ba=1540174342,1540179279,1540180869,1540181371; XIGUADATA=UserId=af18d6e121c10242&checksum=afe304d4f52f&XIGUADATALIMITID=628383e7ee174f278ae3b58bcf60d766; compareArray=[]; mediav=%7B%22eid%22%3A%22163230%22%2C%22ep%22%3A%22%22%2C%22vid%22%3A%227ZCRIS8CG9%3C--eRV46jr%22%2C%22ctn%22%3A%22%22%7D; setPageSize=50; Qs_pv_194035=2778840396801713700%2C2942644872379842600%2C4102929743540580000%2C3064176469521709000%2C1018963967398630300; Hm_lpvt_91a409c98f787c8181d5bb8ee9c535ba=1540200507',
            'Host': 'data.xiguaji.com',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'
        }
    # 发请求
    def parse(self, key=None):
        for page in range(1, 7):
            data = {
                'type': '1',
                'key': 'KOL红人',
                'pageIndex': str(page),
                'IsAjaxRequest': '1'
            }
            base_url = 'http://data.xiguaji.com/Search/SearchAct/?' + urlencode(data)

            res = requests.get(base_url, headers=self.header, verify=False)
            response_html = res.text
            soup = BeautifulSoup(response_html, 'lxml')

            self.concustom(soup, page)

    # 解析请求构建完整参数
    def concustom(self, soup, page=None):
        print('正在解析************%s************页' % page)
        div_list = soup.find_all('div', class_='list-piece clearfix')
        for div in div_list:
            wx_nickname = div.select('.number-details h3')[0].get_text()
            wx_logurl = div.select('.number-logo img')[0].get('src', '')
            wx_qrcode = div.select('.b-qr-code img')[0].get('src', '')
            wx_number = re.compile(r'<div class="number-info">(.*?)<span>', re.S).findall(str(div))[0].replace('\r\n', ' ').replace(' ', '')
            wx_num = wx_number if wx_number != '\n' else '--'
            wx_account = re.compile(r'<div class="number-info">.*?<span>(.*?)</span>', re.S).findall(str(div))

            pat = re.compile(r'a-items-left">.*?<p>(.*?)</p>', re.S).findall(str(div))
            wx_item = pat[0] if pat else '--'
            res_tag = re.compile(r'a-items-right">(.+?)</div>', re.S).findall(str(div))[0]
            response_tags = re.compile(r'<a href=.+?>(.+?)</a></span>').findall(res_tag)
            tag = ' '.join(response_tags) if response_tags else '暂无标签'

            li_tag = re.compile(r'ul class="number-describe-index clearfix">(.+?)<ul>', re.S).findall(str(div))[0]
            # [('预估活跃粉丝', '84'), ('头条平均阅读', '5'), ('头条平均点赞数', '0'), ('头条平均留言数', '0'), ('周发文篇数', '4')]
            li = re.compile(r'<li>(.+?)<span>(.+?)</span></li>').findall(li_tag)
            expect_fans = li[0][1]
            mean_read = li[1][1]
            likes = li[2][1]
            message = li[3][1]
            documents_nums = li[4][1]

            field = {
                'wx_nickname': wx_nickname,
                'wx_logurl': wx_logurl,
                'wx_qrcode': wx_qrcode,
                'wx_num': wx_num,
                'wx_item': wx_item,  # 账号功能
                'tag': tag,
                "expect_fans": expect_fans,
                "mean_read": mean_read,
                "likes": likes,
                "message": message,
                "documents_nums": documents_nums,
            }
            if '个人' in wx_account[0]:
                wx_res = wx_account[0]
                field['wx_res'] = wx_res
            else:
                res = re.compile(r'(.*?)<a').findall(wx_account[0])
                res = res[0] if res else '--'
                field['wx_res'] = res

            self.save_mysql(field)

    def save_mysql(self, fields):
        coon = pymysql.connect('127.0.0.1', 'root', '123456', 'weixin', charset='utf8')
        cursor = coon.cursor()
        db = Mysql(coon, cursor)
        try:
            sql, data = db.insert_to_mysql(fields)
            cursor.execute(sql, data)
            print('正在保存公众号** %s **' % data[0])
            coon.commit()
        except Exception as e:
            print(e)

if __name__ == '__main__':
    run = Wx_spider()
    run.parse()
    print('全部保存完成')

























