# -*- coding: utf-8 -*-
# 快恋网搜索结果爬虫
import json
import re
import urllib.error
import urllib.request
from threading import Thread
from datetime import datetime
import xlwt
from bs4 import BeautifulSoup

from util import picDown


def mainDetail(dataListOld, savePath):
    dataList = []
    # 解析VIPId
    for vipId in dataListOld:
        baseUrl = "http://www.kl.cc/" + str(vipId[0])
        # 爬取数据
        html = askURl(baseUrl)
        # 解析数据
        data = analyseKLDetailData(html)
        # data.append(str(vipId[0]))
        data.append(baseUrl)
        dataList.append(data)
    # 保存图片
    savePicture(dataList)
    # 保存数据
    saveKlDetailData(dataList, savePath)


# 异步数据
def Async(f):
    def wrapper(*args, **kwargs):
        thr = Thread(target=f, args=args, kwargs=kwargs)
        thr.start()

    return wrapper


@Async
def savePicture(dataList):
    # 保存图片
    a = datetime.now()
    for data in dataList:
        myPhoto = data[19]
        myPicture = data[20]
        if len(myPhoto.strip()) != 0:
            picDown.pictureDown(myPhoto)
        if len(myPicture.strip()) != 0:
            picDown.pictureDown(myPicture)
    print("图片保存完毕")
    b = datetime.now()
    print("图片保存耗时：" + str((b - a).total_seconds()) + "秒")


# 保存快恋搜索详细数据
def saveKlDetailData(dataList, savePath):
    print("saveing……saveKlDetailData……")
    workBook = xlwt.Workbook(encoding="utf-8")
    workSheet = workBook.add_sheet("快恋搜索详细结果", cell_overwrite_ok=True)
    # ['http://www.kl.cc/HeadPic/212254_120x160', '黄女士', '414212254', '至婚卡会员',
    # '2020-06-07', '2019-06-01', '21', '女', '30岁', '165公分', '本科', '1万-2万元', '已购京房', '已经购车', '安徽安庆', '未婚', '财务', '马', '金牛座', 'http://cdnimg.kl.cc/memberpic/212254/212254_2019102714370220438509.jpg', 'http://cdnimg.kl.cc/StaticF/SpacePhoto/sp_20190512120803821228987_212254_b.jpg', '
    # 基本资料：目前从事财务工作，和父母定居北京20多年了，经商家庭。祖籍安庆，老家基本不回。男方有稳定的工作；周末可以一起去超市购物、逛街、健身、做饭、培养共同的爱好；彼此有各自独立的空间；分享各自的想法；性格外向、成熟、有责任心、自信、温文尔雅\u3000\u3000资料真实，非诚勿扰 \r\n                    ',
    # '27', '34', '175', '185', '未婚', '男方有稳定的工作；在工作中有上进心；周末可以一起去超市购物、逛街、健身、做饭、培养共同的爱好；彼此有各自独立的空间；分享各自的想法；性格外向、成熟、有责任心、自信、温文尔雅、暖男优先', '离异不考虑']
    col = ("头像地址", "昵称", "用户ID", "卡别", "最近登录日期",
           '最近到店日期', '关注人数', '性别', '年龄', '身高',
           '学历', '月薪', '购房情况', '购车情况', '户籍',
           '婚姻情况', '职业', '属相', '星座', '我的相册',
           '我的留影', '内心独白', '择友年龄开始', '择友年龄结束', '择友身高开始',
           '择友身高结束', '择友学历', '择友婚姻状况', '找啥样的人', '不找啥样的人',
           "VipId")  # 列
    for i in range(0, 31):
        workSheet.write(0, i, col[i])  # 写入列名
    for i in range(0, len(dataList)):
        print("第%d条" % i)
        data = dataList[i]
        for j in range(0, 31):
            workSheet.write(i + 1, j, data[j])
    workBook.save(savePath)
    print("搜索个人主页爬取完毕")


# 保存快恋搜索数据
def saveKlData(dataList, savePath):
    print("saveing……saveKlData……")
    workBook = xlwt.Workbook(encoding="utf-8")
    workSheet = workBook.add_sheet("快恋搜索结果", cell_overwrite_ok=True)
    # {"ID":0,"VipMID":227759,"MLevel":"至婚卡会员","UserName":"曹女士","Age":35,"Height":165},{
    col = ("VipMID", "MLevel", "UserName", "Age", "Height")  # 列
    for i in range(0, 5):
        workSheet.write(0, i, col[i])  # 写入列名
    for i in range(0, len(dataList)):
        print("第%d条" % i)
        data = dataList[i]
        for j in range(0, 5):
            workSheet.write(i + 1, j, data[j])
    workBook.save(savePath)


# 保存豆瓣电影数据
def saveData(dataList, savePath):
    print("saveing……")
    workBook = xlwt.Workbook(encoding="utf-8")
    workSheet = workBook.add_sheet("豆瓣电影250", cell_overwrite_ok=True)
    col = ("电影链接", "图片链接", "中文名", "外文名", "评分", "评价数", "概况", "相关信息")  # 列
    for i in range(0, 8):
        workSheet.write(0, i, col[i])  # 写入列名
    for i in range(0, len(dataList)):
        print("第%d条" % i)
        data = dataList[i]
        for j in range(0, 8):
            workSheet.write(i + 1, j, data[j])

    workBook.save(savePath)


# 获取个人主页信息规则
findMyImg = re.compile(r'<img alt="我的照片" src="(.*?)"/>')
findMyNickName = re.compile(r'<span id="lbUName">(.*?)</span>')
findMyId = re.compile(r'ID : <span>(\d*)</span>')
findMyCard = re.compile(r'卡别：*\r\n\s*<span>(.*?)</span>', re.S)
findMyEnter = re.compile(r'登录：*\r\n\s*<span>(.*?)</span>', re.S)
findMyArrive = re.compile(r'到店：*\r\n\s*<span>(.*?)</span>', re.S)
findMyAttention = re.compile(r'关注我的人：<span>(\d*)</span>', re.S)
# 获取个人资料规则
findMySex = re.compile(r'<li>性别：<span>(.*?)</span></li>')
findMyAge = re.compile(r'<li>年龄：<span>(.*?)</span></li>')
findMyHeight = re.compile(r'<li>身高：<span>(.*?)</span></li>')
findMyEdu = re.compile(r'<li>学历：<span>(.*?)</span></li>')
findMyInCome = re.compile(r'<li>月薪：<span>(.*?)</span></li>')
findMyHouse = re.compile(r'<li>京房：<span>(.*?)</span></li>')
findMyCar = re.compile(r'<li>购车：<span>(.*?)</span></li>')
findMyHousehold = re.compile(r'<li>户籍：<span>(.*?)</span></li>')
findMyMarry = re.compile(r'<li>婚状：<span>(.*?)</span><span id="txtMarryChild"></span></li>')
findMyJob = re.compile(r'<li>职业：<span>(.*?)</span></li>')
findMySX = re.compile(r'<li>属相：<span>(.*?)</span></li>')
findMyXZ = re.compile(r'<li>星座：<span>(.*?)</span></li>')

# 我的相册
findMyPicture = re.compile(r'<img.*src="(.*?)".*/>', re.S)
# 放大加速版地址： http://cdnimg.kl.cc/memberpic/214543/214543_20190601195929587613999.jpg
# 普通迷你版地址： http://img.kl.cc/memberpic/214543/s_214543_20190601195929587613999.jpg

# 我的留影
findMyPhotos = re.compile(r'<img.*src="(.*?)".*/>', re.S)
# 放大加速版地址：http://cdnimg.kl.cc/StaticF/SpacePhoto/sp_20181122200831305756959_207603_b.jpg
# 普通迷你版地址：http://img.kl.cc/StaticF/SpacePhoto/sp_20181122200831305756959_207603_s.jpg

# 内心独白
findFriendHeart = re.compile(r'<div id="secondlayer" style="display: none;">\r\n\s(.*?)</div>', re.S)

# 择友要求
findFriendAgeBegin = re.compile(r'<li>年龄：<span>(\d*)</span>～<span>')
findFriendAgeEnd = re.compile(r'</span>～<span>(\d*)</span>岁</li>')
findFriendHeightBegin = re.compile(r'<li>身高：<span>(\d*)</span>～<span id="lbAskHeightE">')
findFriendHeightEnd = re.compile(r'</span>～<span id="lbAskHeightE">(\d*)</span>CM</li>')
findFriendEdu = re.compile(r'<li>学历：<span>(.*?)</span></li>')
findFriendMarry = re.compile(r'<li>婚状：<span>(.*?)</span><span id="lbAskMarryChild"></span></li>')

# 找啥样的人
findFriendYes = re.compile(r'找啥样的人\r\n\s*(.*?)</span>', re.DOTALL)
# 不找啥样的人
findFriendNo = re.compile(r'不找啥样的人.*<span id="lbAskToOther">(.*?)</span>', re.DOTALL)

# 获取影片详情规则
# 例如： <a href="https://movie.douban.com/subject/1292052/">  得到https://movie.douban.com/subject/1292052/
findLink = re.compile(r'<a href="(.*?)">')  # 电影链接匹配规则
# 图片的链接规则
findImgSrc = re.compile(r'<img.*src="(.*?)"', re.S)  # re.S忽略换行符号
# 影片片名
findTitle = re.compile(r'<span class="title">(.*)</span>')
# 影片评分
findRating = re.compile(r'<span class="rating_num" property="v:average">(.*?)</span>')
# 评价人数
findJudge = re.compile(r'<span>(\d*)人评价</span>')
# 概况
findInq = re.compile(r'<span class="inq">(.*)</span>')
# 找到影片相关内容
findBd = re.compile(r'<p class="">(.*?)</p>', re.S)


# 爬取数据
def getDataList(baseUrl):
    dataList = []
    # for i in range(1, 2288):  # 共计2288页数据范围
    for i in range(1, 50):  # 共计2288页数据范围
        # 1.获取数据源
        html = postTest(baseUrl, i)
        # 2.解析数据
        data = analyseKLData(html)
        for i in range(0, len(data)):
            dataList.append(data[i])
    return dataList


# 快恋网站解析详细数据
def analyseKLDetailData(html):
    print("开始解析数据……")
    data = []
    soup = BeautifulSoup(html, "html.parser")
    # 左上角信息 我的照片 登录时间 到店时间  关注我的人
    itemALl = soup.find_all('div', class_="huiyuan")
    if len(itemALl) == 0:
        return data
    else:
        item = str(itemALl[0])  #
        myImg = re.findall(findMyImg, item)[0]
        myImg = "http://www.kl.cc" + myImg
        data.append(myImg)
        myNickName = re.findall(findMyNickName, item)
        if len(myNickName) != 0:
            data.append(myNickName[0])
        else:
            data.append(" ")
        myId = re.findall(findMyId, item)[0]
        data.append(myId)
        myCard = re.findall(findMyCard, item)
        if len(myCard) != 0:
            data.append(myCard[0])
        else:
            data.append(" ")
        myEnter = re.findall(findMyEnter, item)[0]
        data.append(myEnter)
        myArrive = re.findall(findMyArrive, item)[0]
        data.append(myArrive)
        myAttention = re.findall(findMyAttention, item)[0]
        data.append(myAttention)

        # 个人资料：
        itemTwo = str(itemALl[1])
        sex = re.findall(findMySex, itemTwo)
        if len(sex) != 0:
            data.append(sex[0])
        else:
            data.append(" ")
        age = re.findall(findMyAge, itemTwo)
        if len(age) != 0:
            data.append(age[0])
        else:
            data.append(" ")
        height = re.findall(findMyHeight, itemTwo)
        if len(height) != 0:
            data.append(height[0])
        else:
            data.append(" ")
        edu = re.findall(findMyEdu, itemTwo)
        if len(edu) != 0:
            data.append(edu[0])
        else:
            data.append(" ")
        income = re.findall(findMyInCome, itemTwo)
        if len(income) != 0:
            data.append(income[0])
        else:
            data.append(" ")
        house = re.findall(findMyHouse, itemTwo)
        if len(house) != 0:
            data.append(house[0])
        else:
            data.append(" ")
        car = re.findall(findMyCar, itemTwo)
        if len(car) != 0:
            data.append(car[0])
        else:
            data.append(" ")
        houseHold = re.findall(findMyHousehold, itemTwo)
        if len(houseHold) != 0:
            data.append(houseHold[0])
        else:
            data.append(" ")
        marry = re.findall(findMyMarry, itemTwo)
        if len(marry) != 0:
            data.append(marry[0])
        else:
            data.append(" ")
        job = re.findall(findMyJob, itemTwo)
        if len(job) != 0:
            data.append(job[0])
        else:
            data.append(" ")
        sx = re.findall(findMySX, itemTwo)
        if len(sx) != 0:
            data.append(sx[0])
        else:
            data.append(" ")
        xz = re.findall(findMyXZ, itemTwo)
        if len(xz) != 0:
            data.append(xz[0])
        else:
            data.append(" ")

        # 我的标签：TODO-fwh-新的POST请求获取数据
        # myTag = soup.find_all('div', class_="DVUInfob")
        # myTag=str(myTag[0])

        data = getPhoto(data, soup)

        friendItem = soup.find_all('div', class_="huadong")
        for friend in friendItem:
            friend = str(friend)

            # 内心独白: TODO-fwh-去除多余空格
            friendHeart = re.findall(findFriendHeart, friend)
            if len(friendHeart) != 0:
                data.append(friendHeart[0])
            else:
                data.append(" ")
            # 择友要求：
            ageBegin = re.findall(findFriendAgeBegin, friend)
            if len(ageBegin) != 0:
                data.append(ageBegin[0])
            else:
                data.append(" ")
            ageEnd = re.findall(findFriendAgeEnd, friend)
            if len(ageEnd) != 0:
                data.append(ageEnd[0])
            else:
                data.append(" ")
            heightBegin = re.findall(findFriendHeightBegin, friend)
            if len(heightBegin) != 0:
                data.append(heightBegin[0])
            else:
                data.append(" ")
            heightEnd = re.findall(findFriendHeightEnd, friend)
            if len(heightEnd) != 0:
                data.append(heightEnd[0])
            else:
                data.append(" ")
            friendEdu = re.findall(findFriendEdu, friend)
            if len(friendEdu) != 0:
                data.append(friendEdu[0])
            else:
                data.append(" ")
            requirementMarry = re.findall(findFriendMarry, friend)
            if len(requirementMarry) != 0:
                data.append(requirementMarry[0])
            else:
                data.append(" ")
            # 找啥样的人 TODO-fwh-替换多余标签
            FriendYes = re.findall(findFriendYes, friend)
            if len(FriendYes) != 0:
                FriendYes = re.findall(re.compile(r'<span id="lbAskToOther">(.*?)\s', re.S), FriendYes[0])
                if len(FriendYes) != 0:
                    data.append(FriendYes[0])
                else:
                    data.append(" ")
            else:
                data.append(" ")
            # 不找啥样的人
            FriendNo = re.findall(findFriendNo, friend)
            if len(FriendNo) != 0:
                data.append(FriendNo[0])
            else:
                data.append(" ")
        # print(data)
        return data


# 单独获取图片避免异常
def getPhoto(data, soup):
    try:
        photoItem = soup.find_all('div', class_="DVMyPhoto")
        # 我的相册-TODO-fwh-多张图片只能查出一张
        photoOne = str(photoItem[0])
        myPictures = re.findall(findMyPicture, photoOne)
        if len(myPictures) != 0:
            m = re.sub("img", "cdnimg", myPictures[0])
            myPictureOk = re.sub("s_", "", m)
            data.append(myPictureOk)
        else:
            data.append(" ")
        # 我的留影
        photoTwo = str(photoItem[1])
        myPhotos = re.findall(findMyPhotos, photoTwo)
        if len(myPhotos) != 0:
            n = re.sub("img", "cdnimg", myPhotos[0])
            myPhotoOk = re.sub("_s", "_b", n)
            data.append(myPhotoOk)
        else:
            data.append(" ")
    except:
        IndexError
    if hasattr(IndexError, "code"):
        print(IndexError.code)
    if hasattr(IndexError, "reason"):
        print(IndexError.reason)

    return data


# 我的微信


# 快恋网站解析数据
def analyseKLData(html):
    dataList = []
    object = json.loads(html)
    PageSize = object['PageSize']
    PageCount = object['PageCount']
    PageNo = object['PageNo']
    Amount = object['Amount']
    Entity = object['Entity']
    for i in range(0, len(Entity)):
        data = []
        VipMID = Entity[i]['VipMID']
        data.append(VipMID)
        MLevel = Entity[i]['MLevel']
        data.append(MLevel)
        UserName = Entity[i]['UserName']
        data.append(UserName)
        Age = Entity[i]['Age']
        data.append(Age)
        Height = Entity[i]['Height']
        data.append(Height)
        dataList.append(data)
    return dataList


# 豆瓣网站解析数据
def analyseData(html):
    soup = BeautifulSoup(html, "html.parser")
    for item in soup.find_all('div', class_="item"):  # 制定规则获取内容
        # print(item)  # 测试查看数据信息
        data = []  # 保存一部电影所有信息
        item = str(item)  # 转换成字符串
        link = re.findall(findLink, item)[0]  # 正则匹配
        data.append(link)
        img = re.findall(findImgSrc, item)[0]  # 正则匹配
        data.append(img)
        title = re.findall(findTitle, item)  # 正则匹配
        # 判断中英文名 多个情况
        if (len(title) == 2):
            data.append(title[0])  # 中文名
            data.append(title[1].replace("/", ""))  # 外文名
        else:
            data.append(title[0])
            data.append(' ')  # 留空，Excel处理必须占位
        rating = re.findall(findRating, item)[0]  # 正则匹配
        data.append(rating)
        judge = re.findall(findJudge, item)[0]  # 正则匹配
        data.append(judge)
        inq = re.findall(findInq, item)  # 会有空的情况
        if len(inq) != 0:
            inq[0].replace("。", " ")
            data.append(inq)
        else:
            data.append(" ")
        bd = re.findall(findBd, item)[0]  # 会有空的情况
        bd = re.sub('<br(\s+)?/>(\s+)?', " ", bd)  # 替换<br>
        bd = re.sub('/', " ", bd)
        data.append(bd.strip())  # 去掉前后空格
        return data


# 指定一个URL内容
def askURl(url):
    print("请求地址：" + url)
    headers = {
        "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.61 Safari/537.36"
    }
    request = urllib.request.Request(url, headers=headers)
    try:
        response = urllib.request.urlopen(request)
        # print(response.read().decode("utf-8"))
    except urllib.error.URLError as e:
        if hasattr(e, "code"):
            print(e.code)
        if hasattr(e, "reason"):
            print(e.reason)
    return response


# 得到页面内容
def postTest(url, current):
    from pip._vendor import requests
    # TODO-fwh-可以增加搜索条件
    d = {'PageNo': current,
         # 所有条件
         # 'SCondition': '{\'Sex\':\'-1\',\'AgeB\':\'0\',\'AgeE\':\'0\',\'Edu\':\'0\',\'HeightB\':\'0\',\'HeightE\':\'0\',\'VipLevel\':\'-1\',\'Marry\':\'0\',\'House\':\'0\',\'Vocation\':\'不限\',\'XZ\':\'0\',\'SX\':\'0\',\'InCome\':\'0\',\'Car\':\'0\',\'HouseHoldP\':\'0\',\'HouseHoldC\':\'0\',\'Race\':\'0\',\'HeadPic\':\'1\',\'DSort\':\'1\'}'}
         # 男
         'SCondition': '{\'Sex\':\'0\',\'AgeB\':\'0\',\'AgeE\':\'0\',\'Edu\':\'0\',\'HeightB\':\'0\',\'HeightE\':\'0\',\'VipLevel\':\'-1\',\'Marry\':\'0\',\'House\':\'0\',\'Vocation\':\'不限\',\'XZ\':\'0\',\'SX\':\'0\',\'InCome\':\'0\',\'Car\':\'0\',\'HouseHoldP\':\'0\',\'HouseHoldC\':\'0\',\'Race\':\'0\',\'HeadPic\':\'1\',\'DSort\':\'1\'}'}
    r = requests.post(url, data=d)
    return r.text


# 测试调用程序
if __name__ == "__main__":
    # baseUrl = "https://movie.douban.com/top250?start=25&filter="
    # baseUrl = "https://movie.douban.com/top250?start="
    # baseUrl = "http://www.kl.cc/SearchLuck/GetSearchJson?t=Sat%20Jun%2006%202020%2017:46:36%20GMT+0800%20(%E4%B8%AD%E5%9B%BD%E6%A0%87%E5%87%86%E6%97%B6%E9%97%B4)"
    # baseUrl = "http://www.kl.cc/207603"
    # baseUrl = "http://www.kl.cc/214543"
    # baseUrl = "http://www.kl.cc/212254"
    # baseUrl = "http://www.kl.cc/219257"
    # baseUrl = "http://www.kl.cc/217435"
    # baseUrl = "http://www.kl.cc/172589" #有空数据特殊处理
    baseUrl = "http://www.kl.cc/137349" #有空数据特殊处理
    html = askURl(baseUrl)
    analyseKLDetailData(html)
    # getDataList(baseUrl)
    # main()
    # postTest(baseUrl)
    # getDataList(baseUrl)
