import time
import datetime
import random
import json
import requests
import pandas as pd
from bs4 import BeautifulSoup
from faker import Factory


def get_user_agent(num):
    """
    生成不同的 user-agent
    :param num: 生成个数
    :return: list
    """
    factory = Factory.create()
    user_agent = []
    for i in range(num):
        user_agent.append({'User-Agent': factory.user_agent()})
    return user_agent


def get_proxy(pages, ua_num, target_url):
    """
    爬取代理数据，清洗整合
    :param pages: 需要爬取页数
    :param ua_num: 需要user-agent个数
    :param target_url: 爬虫的目标地址，作为验证代理池ip的有效性
    :return: list
    """
    headers = get_user_agent(ua_num)  # 请求头
    proxy_list = []  # 最后需入库保存的代理池数据
    try:
        for num in range(0, pages):
            print('Start：第 %d 页请求' % (num + 1))
            # 请求路径
            url = 'https://www.kuaidaili.com/free/inha/' + str(num + 1) + '/'

            # 随机延时（randint生成的随机数n: a <= n <= b ；random产生 0 到 1 之间的随机浮点数）
            time.sleep(random.randint(1, 2) + random.random())
            header_i = random.randint(0, len(headers) - 1)  # 随机获取1个请求头

            # BeautifulSoup 解析
            html = requests.get(url, headers=headers[header_i])
            soup = BeautifulSoup(html.text, 'lxml')

            # CSS 选择器
            ip = soup.select("td[data-title='IP']")
            port = soup.select("td[data-title='PORT']")
            degree = soup.select("td[data-title='匿名度']")
            proxy_type = soup.select("td[data-title='类型']")
            position = soup.select("td[data-title='位置']")
            speed = soup.select("td[data-title='响应速度']")
            last_time = soup.select("td[data-title='最后验证时间']")

            # 循环验证是否有效
            for i, p, dg, pt, ps, sp, lt in zip(ip, port, degree, proxy_type, position, speed, last_time):
                ip_port = str(i.get_text()) + ':' + str(p.get_text())
                # 调用验证的方法
                flag = is_useful(ip_port, headers[header_i], target_url)
                if flag:
                    # 拼装字段
                    p_ip = str(i.get_text())
                    p_port = str(p.get_text())
                    p_degree = str(dg.get_text())
                    p_type = str(pt.get_text())
                    p_position = str(ps.get_text()).rsplit(' ', 1)[0]
                    p_operator = str(ps.get_text()).rsplit(' ')[-1]
                    p_speed = str(sp.get_text())
                    p_last_time = str(lt.get_text())

                    proxy_list.append([p_ip, p_port, p_degree, p_type, p_position, p_operator, p_speed, p_last_time])
            print('End：第 %d 页结束！==========================' % (num + 1))

    except Exception as e:
        print('程序 get_proxy 发生错误，Error：', e)

    finally:
        # 调用保存的方法
        write_proxy(proxy_list)

    return proxy_list


def is_useful(ip_port, headers, target_url):
    """
    判断ip是否可用
    :param ip_port: ip+端口号
    :param headers: 随机请求头
    :param target_url: 爬虫的目标地址，作为验证代理池ip的有效性
    :return: bool
    """
    url = target_url  # 验证ip对目标地址的有效性
    proxy_ip = 'http://' + ip_port
    proxies = {'http': proxy_ip}
    flag = True
    try:
        requests.get(url=url, headers=headers, proxies=proxies, timeout=2)
        print("【可用】：" + ip_port)
    except Exception as e:
        print('程序 is_useful 发生错误，Error：', e)
        flag = False
    return flag


def write_proxy(proxy_list):
    """
    将清洗好的列表数据，保存到xlsx文件
    :param proxy_list: 代理池数据列表
    :return: bool
    """
    date_now = datetime.datetime.now().strftime('%Y%m%d%H%M%S')  # 当前时间
    flag = True  # 保存成功标志
    print('--- 开始保存 ---')
    try:
        df = pd.DataFrame(proxy_list,
                          columns=['ip', 'port', 'degree', 'type', 'position', 'operator', 'speed', 'last_time'])
        df.to_excel('my_proxy.xlsx', index=False)
        print('--- 保存成功！---')
    except Exception as e:
        print('--- 保存失败！---：', e)
        flag = False
    return flag


def read_ip():
    """
    读取代理池，返回ip:port列表
    :return: list
    """
    # 读取文件
    proxy_list = pd.read_excel('my_proxy.xlsx')
    proxy_list['port'] = proxy_list['port'].astype('str')  # 先将端口号的整型转为字符串
    proxy_list['ip_port'] = proxy_list['ip'].str.cat(proxy_list['port'], sep=':')  # 组合成ip+port
    proxy_list['final'] = proxy_list['ip_port'].apply(lambda x: {'http': x})
    return list(proxy_list['final'])


def get_ip():
    """
    主方法
    """
    pages = 2  # 定义爬取页数
    ua_num = 3  # 定义需生成user-agent个数
    target_url = 'https://www.baidu.com'  # 爬虫的目标地址，作为验证代理池ip的有效性
    proxy_list = get_proxy(pages, ua_num, target_url)
    # print(proxy_list)


def get_proxies():
    get_ip()
    # 2.读取代理池
    proxy = read_ip()
    return proxy


def get_shop(proxies, uid):
    headers = {
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36',
        'referer': 'https://winona.tmall.com',
        'cookie': 'cna=phTeF3tnVB8CAbc++e1bmIoE; pnm_cku822=; hng=CN%7Czh-CN%7CCNY%7C156; lid=tb551427943; enc=BMsRNDV7zdi4rgyFWoo1MjpjlkjbEZtBLv64srr%2FrTlY67pWZKi3LzA6aNhxxW7BlhOaTAg3Df7yL%2BFqlvSyFE3zm0omdbGUCWdF9g%2FkMzU%3D; xlly_s=1; sgcookie=E100QpFgIWdGuh%2BR3U2qw15z0gtj0g1%2Bd3Kmtyv8WJqlPSlPY9F2w2cgINe%2FPoyXBGNO7r8HXIbs0299NdpbukHRig%3D%3D; uc1=cookie14=Uoe0bU9HHTExHw%3D%3D; t=92b9f65ff76a32c87b930808927c1bef; uc3=nk2=F5RAR4CIo2a9LBk%3D&id2=UUphw2QjwsUuB3wing%3D%3D&lg2=U%2BGCWk%2F75gdr5Q%3D%3D&vt3=F8dCufbMgo5Qg3i5DkM%3D; tracknick=tb551427943; uc4=nk4=0%40FY4L6wiPyYFlBDYV0nQuuX15qj%2BaTA%3D%3D&id4=0%40U2grGNttFd3vjCSJ%2Fz6JCJAuMmq7Z6%2BV; lgc=tb551427943; _tb_token_=3638f99a9be7b; cookie2=11642829fab2abd3ef585d33d6750457; cq=ccp%3D1; _m_h5_tk=353e8b1ab8f1aa1bc230c417a607ee4b_1600761621840; _m_h5_tk_enc=ce3ba69c6d5337e2aa044d174fc208e4; tfstk=cSydBO9MBNbnNFiT0WCiVmapz1jGZT7-5HgkeiKLaHAdPV9Ri8w0HSGcO0TKBGC..; l=eBOWDlUmOh8O0w_jBOfwourza77OSIRAguPzaNbMiOCP_k5p5KCFWZr6RwY9C3GVh67XR3uV-FgLBeYBqIcxTXwpIavA_1kmn; isg=BNLSiBOb2QSsZSXG6kjlMhRPI5i049Z9R2ocupwr_gVwr3KphHMmjdjJHwuT304V',
        'accept': '*/*',
        'accept-encoding': 'gzip, deflate, br',
        'accept-language': 'zh-CN,zh;q=0.9'
    }

    for i in range(2):
        url = 'https://winona.tmall.com/i/asynSearch.htm?mid=w-14573268338-0&pageNo={}'.format(i + 1)  # 薇诺娜店铺的url
        response = requests.get(url, headers=headers, proxies=random.choice(proxies)).text  # requests请求
        soup = BeautifulSoup(response, 'lxml')  # 解析网页
        for con in soup.find_all('dl'):
            uid.append(con.get('data-id').replace('\\', '').replace('"', ''))
        time.sleep(2 + random.random())
        print('正在获取第{}页商品信息'.format(i + 1))


def get_goods_comment(proxies, uid):
    print('start')
    session1 = requests.session()
    headers = {
        'cookie': 'cna=qMU/EQh0JGoCAW5QEUJ1/zZm; enc=DUb9Egln3%2Fi4NrDfzfMsGHcMim6HWdN%2Bb4ljtnJs6MOO3H3xZsVcAs0nFao0I2uau%2FbmB031ZJRvrul7DmICSw%3D%3D; lid=%E5%90%91%E6%97%A5%E8%91%B5%E7%9B%9B%E5%BC%80%E7%9A%84%E5%A4%8F%E5%A4%A9941020; otherx=e%3D1%26p%3D*%26s%3D0%26c%3D0%26f%3D0%26g%3D0%26t%3D0; hng=CN%7Czh-CN%7CCNY%7C156; x=__ll%3D-1%26_ato%3D0; t=2c579f9538646ca269e2128bced5672a; _m_h5_tk=86d64a702eea3035e5d5a6024012bd40_1551170172203; _m_h5_tk_enc=c10fd504aded0dc94f111b0e77781314; uc1=cookie16=V32FPkk%2FxXMk5UvIbNtImtMfJQ%3D%3D&cookie21=U%2BGCWk%2F7p4mBoUyS4E9C&cookie15=UtASsssmOIJ0bQ%3D%3D&existShop=false&pas=0&cookie14=UoTZ5bI3949Xhg%3D%3D&tag=8&lng=zh_CN; uc3=vt3=F8dByEzZ1MVSremcx%2BQ%3D&id2=UNcPuUTqrGd03w%3D%3D&nk2=F5RAQ19thpZO8A%3D%3D&lg2=U%2BGCWk%2F75gdr5Q%3D%3D; tracknick=tb51552614; _l_g_=Ug%3D%3D; ck1=""; unb=3778730506; lgc=tb51552614; cookie1=UUBZRT7oNe6%2BVDtyYKPVM4xfPcfYgF87KLfWMNP70Sc%3D; login=true; cookie17=UNcPuUTqrGd03w%3D%3D; cookie2=1843a4afaaa91d93ab0ab37c3b769be9; _nk_=tb51552614; uss=""; csg=b1ecc171; skt=503cb41f4134d19c; _tb_token_=e13935353f76e; x5sec=7b22726174656d616e616765723b32223a22393031623565643538663331616465613937336130636238633935313935363043493362302b4d46454e76646c7243692b34364c54426f4d4d7a63334f44637a4d4455774e6a7378227d; l=bBIHrB-nvFBuM0pFBOCNVQhjb_QOSIRYjuSJco3Wi_5Bp1T1Zv7OlzBs4e96Vj5R_xYB4KzBhYe9-etui; isg=BDY2WCV-dvURoAZdBw3uwj0Oh2yUQwE5YzQQ9qAfIpm149Z9COfKoZwV-_8q0HKp',
        'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36',
        'referer': 'https://detail.tmall.com/item.htm?spm=a1z10.5-b-s.w4011-17205939323.51.30156440Aer569&id=41212119204&rn=06f66c024f3726f8520bb678398053d8&abbucket=19&on_comment=1&sku_properties=134942334:3226348',
        'accept': '*/*',
        'accept-encoding': 'gzip, deflate, br',
        'accept-language': 'zh-CN,zh;q=0.9'
    }

    file = open('G:/毕业设计/myspider/comment/winona.txt', 'w+', encoding='UTF-8')

    for i in range(len(uid)):
        url = 'https://rate.tmall.com/list_detail_rate.htm?itemId={}&spuId=273210686&sellerId=525910381&order=3&currentPage=1'.format(
            uid[i])
        response = session1.get(url, headers=headers, proxies=random.choice(proxies)).text
        js = json.loads(response[11:-1])
        page = js['rateDetail']['paginator']['lastPage']
        print('----------------{}------------------'.format(i + 1))
        for j in range(1, int(page) + 1):
            comment_url = 'https://rate.tmall.com/list_detail_rate.htm?itemId={' \
                          '}&spuId=273210686&sellerId=525910381&order=3&currentPage={}'.format(
                uid[i], j)
            comment_res = session1.get(comment_url, headers=headers,
                                       proxies=random.choice(proxies)).text
            comment = json.loads(comment_res[11:-1])
            for k in comment['rateDetail']['rateList']:
                content = k['rateContent'].replace(',', '，')  # 评论内容
                date = k['rateDate']  # 评论日期
                nickname = k['displayUserNick']  # 评论人
                text = ','.join((uid[i], nickname, date, content)) + '\n'
                file.write(text)
            time.sleep(5 + random.random())
            print('正在获取第{}页评论，共{}页评论'.format(j, int(page) + 1))

        time.sleep(6 + random.random())
    file.close()


def run_spider():
    uid = []
    proxies = get_proxies()  # 获取代理IP

    get_shop(proxies, uid)  # 获取店铺信息
    uid = list(set(uid))
    print('--------------共获取{}件商品----------------'.format(len(uid)))
    get_goods_comment(proxies, uid)  # 获取评论信息


if __name__ == "__main__":
    run_spider()
