import requests
import time
from bs4 import BeautifulSoup
import re
import pandas as pd
from selenium import webdriver
import csv
import urllib
import random
from datetime import datetime

#免费代理网址列表
daili_webset_dict = {'快代理':'https://www.kuaidaili.com/free/',
                '66免费代理':'http://www.66ip.cn/',
                '89免费代理':'https://www.89ip.cn/',
                'mimvp':'https://proxy.mimvp.com/freesecret.php',
                '云代理':'http://www.ip3366.net/',
                'seofangfa':'https://seofangfa.com/proxy/',
                '鲲鹏':'http://www.site-digger.com/html/articles/20110516/proxieslist.html',
                '尼玛代理':'http://www.nimadaili.com/',
                '芝麻代理':'https://zhimahttp.com/?utm-source=bdtg&utm-keyword=?DL201198',
                }

#pc端User-Agent列表
user_agent_list_pc = [
        # 各种PC端
        # Opera
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36 OPR/26.0.1656.60",
        "Opera/8.0 (Windows NT 5.1; U; en)",
        "Mozilla/5.0 (Windows NT 5.1; U; en; rv:1.8.1) Gecko/20061208 Firefox/2.0.0 Opera 9.50",
        "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 9.50",
        # Firefox
        "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0",
        "Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10",
        # Safari
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.57.2 (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2",
        # chrome
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36",
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11",
        "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.133 Safari/534.16",
        # 360
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.101 Safari/537.36",
        "Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko",
        # 淘宝浏览器
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 TaoBrowser/2.0 Safari/536.11",
        # 猎豹浏览器
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER",
        "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; LBBROWSER)",
        "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E; LBBROWSER)",
        # QQ浏览器
        "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; QQBrowser/7.0.3698.400)",
        "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)",
        # sogou浏览器
        "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.84 Safari/535.11 SE 2.X MetaSr 1.0",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SV1; QQDownload 732; .NET4.0C; .NET4.0E; SE 2.X MetaSr 1.0)",
        # maxthon浏览器
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Maxthon/4.4.3.4000 Chrome/30.0.1599.101 Safari/537.36",
        # UC浏览器
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.122 UBrowser/4.0.3214.0 Safari/537.36",
        # 一部分 PC端的
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
        "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
        "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
        "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
        "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
    ]

#手机端User-Agent列表
user_agent_list_phone = [
        # IPhone
        "Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
        # IPod
        "Mozilla/5.0 (iPod; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
        # IPAD
        "Mozilla/5.0 (iPad; U; CPU OS 4_2_1 like Mac OS X; zh-cn) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5",
        "Mozilla/5.0 (iPad; U; CPU OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
        # Android
        "Mozilla/5.0 (Linux; U; Android 2.2.1; zh-cn; HTC_Wildfire_A3333 Build/FRG83D) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
        "Mozilla/5.0 (Linux; U; Android 2.3.7; en-us; Nexus One Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
        # QQ浏览器 Android版本
        "MQQBrowser/26 Mozilla/5.0 (Linux; U; Android 2.3.7; zh-cn; MB200 Build/GRJ22; CyanogenMod-7) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
        # Android Opera Mobile
        "Opera/9.80 (Android 2.3.4; Linux; Opera Mobi/build-1107180945; U; en-GB) Presto/2.8.149 Version/11.10",
        # Android Pad Moto Xoom
        "Mozilla/5.0 (Linux; U; Android 3.0; en-us; Xoom Build/HRI39) AppleWebKit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13",
        # BlackBerry
        "Mozilla/5.0 (BlackBerry; U; BlackBerry 9800; en) AppleWebKit/534.1+ (KHTML, like Gecko) Version/6.0.0.337 Mobile Safari/534.1+",
        # WebOS HP Touchpad
        "Mozilla/5.0 (hp-tablet; Linux; hpwOS/3.0.0; U; en-US) AppleWebKit/534.6 (KHTML, like Gecko) wOSBrowser/233.70 Safari/534.6 TouchPad/1.0",
        # Nokia N97
        "Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 NokiaN97-1/20.0.019; Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/525 (KHTML, like Gecko) BrowserNG/7.1.18124",
        # Windows Phone Mango
        "Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0; HTC; Titan)",
        # UC浏览器
        "UCWEB7.0.2.37/28/999",
        "NOKIA5700/ UCWEB7.0.2.37/28/999",
        # UCOpenwave
        "Openwave/ UCWEB7.0.2.37/28/999",
        # UC Opera
        "Mozilla/4.0 (compatible; MSIE 6.0; ) Opera/UCWEB7.0.2.37/28/999",
    ]

def save_page_txt(file_name,content):
    with open(file_name, "w", encoding="utf-8") as file:
        file.write(content)
        file.flush()
def save_csv(file_name,write,encoding="utf-8"):
    #！！！！能别用gbk就别用gbk坑太多！！！
    #write之前老师给的例子中传入的是元组，list也行，a是不断写入，w是清除写
    #gbk无法转换'\xa0'之类字符,所以在转换前需要将“\”替换掉，不过！！！没法完全替换！！！，很多是不可见的
    # repr() 函数可以将字符串转换为python的原始字符串（即忽视各种特殊字符的作用)不然replace不了'//'
    #再用eval()还原，不然写入后的文字都是带引号的
    if encoding == 'gbk':
        new = []
        for info in write:
            if type(info) == str:#里面有int或者其他格式，需要判断一下是否是str再替换
                info = eval(repr(info).replace('\\', '/'))
            new.append(info)
        write = new

    with open(file_name, "a", encoding=encoding,newline='') as file:
        csv_writer = csv.writer(file)
        csv_writer.writerow(write)

#采用selenium，传入url,在pag_source中查找
def find_in_source_selenium(url,key_list):
    driver = webdriver.Chrome()
    driver.get(url)
    content = driver.page_source#查看原网页
    for s in key_list:
        index = content.find(s)
        if index != -1:
            print('找到{}，index：{}，前后内容为:{}'.format(s,index,content[index-20:index+20]))
        else:
            print('未在获取的网页源码中找到:{}'.format(s))
#采用requests，传入url,在source中查找
def find_in_source_requests(url,header_txt,find_list,encoding=None):
    response = header_requests_get(header_txt, url, encoding=encoding)
    print('返回值：{}'.format(response))
    response = response.text

    for find in find_list:
        index = response.find(find)
        if index != -1:
            print('{}在源码中已找到'.format(find))
        else:
            print('源码未找到：{}'.format(find))

# header直接复制粘贴到txt中，转换成dict
def header_trans(header_filename,prin = False):
    '''header直接复制粘贴到txt中，header_filename（如：'headers.txt'）'''
    with open(header_filename, "r") as f:  # 打开文件
        headers = f.read()  # 读取文件
    #按\n分割成list
    trans_list = headers.split('\n')

    #将list转换成dict
    trans_dict = {}
    for i in trans_list:
        if ': ' in i:
            item = i.split(': ')
        elif ':' in i:
            item = i.split(':')
        else:
            print("未找到分隔符':'or': '")
        trans_dict[item[0]]=item[1]
    if prin == True:
        print(trans_dict)

    return trans_dict

#通过复制到txt中的请求头，进行request请求，返回get的值，同时可储存代码到TXT
def header_requests_get(header_filename,url,encoding = None,save=False,filename = None):
    #获取请求头（将request_header粘贴到名为'headers.txt'的文件中）
    headers = header_trans(header_filename)
    r = requests.get(url, headers=headers)

    if encoding != None:
        r.encoding = encoding

    if save == True:
        with open(filename +'.txt', 'wb+') as f:
            f.write(r.content)
    return r

    # 将网页代码保存

#将requests_get获取的内容转成BeautifulSoup
def BF4_cutcontent(re_get):
    html = re_get.text
    bf = BeautifulSoup(html, "lxml")

    return bf

#用正则表达式提取内容
def re_compile_findall(rules,content):
    """rules,如：r"\d{1,2}月\d{1,2}日"；content:str；返回list"""
    import re
    pattern = re.compile(rules)  # 几月几日
    needs_list = pattern.findall(content)#返回的是列表

    return needs_list

def website_Login(account, password,postUrl,headers):
    # python2 和 python3的兼容代码
    try:
        # python2 中
        import cookielib
        print(f"user cookielib in python2.")
    except:
        # python3 中
        import http.cookiejar as cookielib
        print(f"user cookielib in python3.")
    #session代表某一次连接

    websiteSession = requests.session()
# 因为原始的session.cookies 没有save()方法，所以需要用到cookielib中的方法LWPCookieJar，这个类实例化的cookie对象，就可以直接调用save方法。
    websiteSession.cookies = cookielib.LWPCookieJar(filename="loginCookies.txt")

    print("开始模拟登录嘻嘻嘻")

    postData = {
        "username": account,
        "password": password,
    }

    # 使用session直接post请求
    responseRes = websiteSession.post(postUrl, data=postData, headers=headers)
    # 无论是否登录成功，状态码一般都是 statusCode = 200
    # responseRes = requests.post(postUrl, data = postData, headers = headers)
    # 无论是否登录成功，状态码一般都是 statusCode = 200
    print(f"statusCode = {responseRes.status_code}")
    # print(f"text = {responseRes.text}")
    websiteSession.cookies.save()

    responseRes = websiteSession.get(postUrl, headers=headers, allow_redirects=False)
    result = responseRes.text
    with open('douban_2.txt', 'wb+') as f:
        f.write(responseRes.content)
    print(result)



if __name__ == '__main__':

    #######北京地铁微博运量爬取#########
    def bjsubway_pa(save = False):
        headers = header_trans('headers.txt')
        content=[]
        for i in range(1,18):
            url = 'https://s.weibo.com/weibo?q=%23%E5%AE%A2%E6%B5%81%E8%A7%82%E5%AF%9F%23&from=default&page='+str(i)

            # 获取网页,#将网页代码保存
            r = requests.get(url, headers=headers)
            if save == True:
                with open('地铁流量page'+str(i)+'.txt', 'wb+') as f:
                    f.write(r.content)
            # 截取所需信息
            html = r.text
            bf = BeautifulSoup(html,"lxml")
            texts = bf.find_all('p', class_='txt')
            content.append(texts)
            print('已爬取第{}页'.format(i))
            time.sleep(10)

        if save == True :
            with open('地铁流量.txt', 'w',encoding='UTF-8') as f:
                f.write(str(content))
        return content #list结构

    def bjsubway_save():
        with open('地铁流量.txt', 'r', encoding='UTF-8') as f:
            content = f.read()
        content_list = content.split('\n')

        new_list = [] #建立一个新的内容列表储存过滤后的coutent
        for i, value in enumerate(content_list):
            # 去除无用条目
            if '昨日<em class="s-color-red">客流</em>' in value: pass
            else:  content_list.pop(i)
            # 重复的不添加
            if value not in new_list: new_list.append(value)

        #从内容中截取需要的值储存到DataFrame中
        df = pd.DataFrame(columns=['date', 'people'])#建立空frame
        for i, value in enumerate(new_list):
            #提取所需的数据

            pattern_date = re.compile(r"\d{1,2}月\d{1,2}日")  # 几月几日
            pattern_num = re.compile(r"\d{2,4}\.\d{0,3}万|\d{2,4}万")  # 2-4位的浮点数或2-4位整数
            date = pattern_date.findall(value)
            num_str = pattern_num.findall(value)

            #根据条件对数据进行筛选与保存
            if len(date) == 1 and len(num_str) == 1:
                num_str = num_str[0]
                date_str = date[0]
                size = df.index.size
                df.loc[size] = [date_str, num_str]
            elif (len(date) != 1) and (len(num_str) == len(date)):
                for n,date_str in enumerate(date):
                    num = num_str[n]
                    size = df.index.size
                    df.loc[size] = [date_str, num]
            else: pass

        #人数列转换为float
        df['people'] = df['people'].map(lambda x: float(x.replace('万','')))

        #去除干扰数据
        df.drop_duplicates()#去重
        df.drop(index = df[df['people']>1200].index,inplace=True)

        # 中文年月日转换成连接符格式，将如1月2日转换成01/02 用re.compile+findall，提取月日
        pattern_date = re.compile(r"\d{1,2}")  # 几月几日
        def month_day_trans(str):
            mouth_day = pattern_date.findall(str)
            if len(mouth_day[0]) == 1: mouth_day[0] = '0' + mouth_day[0]
            if len(mouth_day[1]) == 1: mouth_day[1] = '0' + mouth_day[1]
            return mouth_day[0] + '/' + mouth_day[1]
        df['date'] = df['date'].map(lambda x: month_day_trans(x))

        #去掉时间跨度太大的数据
        ruler_list = []
        for i in df.index:
            try:ruler_list.append(df['date'][i-1][0] + df['date'][i][0] + df['date'][i+1][0])
            except:ruler_list.append('0'*3)
        del_list = []
        for index,i in enumerate(ruler_list):
            if i == '010' or i == '101':
                del_list.append(index)
        df.drop(index = del_list,inplace = True)

        #添加年份，看月份如果当前月比上一个月要小很多那就是跨年了
        df.reset_index(drop=True,inplace=True)#重排索引为连续值
        start = 2021
        for i in df.index:
            time = str(start) + '/' + df['date'][i]
            try :
                now_month = int(df['date'][i][:2])
                last_month = int(df['date'][i + 1][:2])
                if (now_month+1) < last_month : start = start - 1
            except: pass
            df['date'][i] = time

        # 日期格式转换，加年月日星期，是否周末列
        def time_split(df,date_column):
            df[date_column] = pd.to_datetime(df[date_column])
            df['year'] = df[date_column].dt.year
            df['month'] = df[date_column].dt.month
            df['day'] = df[date_column].dt.day
            df['week'] = df[date_column].dt.weekday +1
            df['date'] = df[date_column].dt.date  # 不然存到excel里面还有小时分秒，看着太长
            def is_weekend(x):
                if x == 6 or x == 7: return 1
                else: return 0
            df['weekend'] = df['week'].map(lambda x: is_weekend(x))
        time_split(df,'date')

        df.to_excel('北京地铁客流数据.xlsx', index=False)
        return df

    df = bjsubway_save()
