'''
把参数拼接成拼接url
'''
import json
from datetime import datetime

from config.local_config import DOUYIN_URl
from config import local_config as config
from urllib.parse import quote,urlencode,unquote
from public_act.data_act import getCurrentTime
from root_directory import root_directory
from urllib.parse import urlparse, parse_qs, unquote

def queryData(page):
    '''
    获得url中查询部分的参数，并拼接成url
    '''
    pageIndex = page
    dataFrom = getCurrentTime('%Y-%m-%d')
    dataTo = getCurrentTime('%Y-%m-%d')
    dataSecond = getCurrentTime()
    # 将时间字符串转换为 datetime 对象
    dt = datetime.strptime(dataSecond, "%Y-%m-%d %H:%M:%S.%f")

    # 将 datetime 对象转换为时间戳（秒为单位）
    dataSecond = int(dt.timestamp() * 1000)



    query_dict = {
        'pageIndex' : pageIndex,
        'pageSize' : 10,
        'q.sort' : 1,
        'q.keywordType' : 0,
        'q.dyContentTagId' : 2024,
        'q.keyword': '',
        'q.tag' : '母婴育儿',
        'q.searchType' : 1,
        'q.period' : 24,
        'q.dateFrom':dataFrom,
        'q.dateTo':dataTo,
        '_':dataSecond
    }

    '''会自动把中文解析'''
    query = urlencode(query_dict)

    return query

def top_20_url(index):
    ''''
    把参数拼接成url
    '''
    query = queryData(index)
    url = config.CHANXIAOHONG_URL + query

    return url


def detail_url(json_data):
    '''
    获得前20条数据的url
    '''
    i = 1
    json_data = json.loads(json_data)

    detail_urls = []

    results = []
    for data in json_data["Data"]["AwemeList"]:

        videoUrl = data.get('VideoUrl')
        detail_urls.append(videoUrl)

        result = {
            'BloggerNickName': data['BloggerNickName'],
            'Desc': data['Desc'],
            'Score':data['Score'],
            'LikeCount': data['LikeCount'],
            'CommentCount': data['CommentCount'],
            'ShareCount': data['ShareCount'],
            'Fans': data['Fans'],
            'CollectCount': data['CollectCount'],
            'VideoUrl':data['VideoUrl']
        }

        results.append(result)


        # 删除每一条数据中不要的东西

    return detail_urls, results


def parse_url(url):

    urldata = url
    # 解析中文
    urldata = unquote(urldata, encoding='gbk', errors='replace')
    # 分割url
    result = urlparse(urldata)
    # 分割请求参数部分
    print(parse_qs(result.query))




if __name__ == '__main__':
    url1 = r'https://dy.feigua.cn/api/v1/aweme/search/list?pageIndex=1&pageSize=10&q.sort=0&q.keywordType=0&q.dyContentTagId=2024005&q.keyword=&q.tag=%E6%AF%8D%E5%A9%B4%E8%82%B2%E5%84%BF&q.searchType=1&q.period=24&q.dateFrom=2023-07-26&q.dateTo=2023-07-26&_=1690339817133'
    url2 = r'https://dy.feigua.cn/api/v1/aweme/search/list?pageIndex=2&pageSize=10&q.sort=0&q.keywordType=0&q.dyContentTagId=2024005&q.keyword=&q.tag=%E6%AF%8D%E5%A9%B4%E8%82%B2%E5%84%BF&q.searchType=1&q.period=24&q.dateFrom=2023-07-26&q.dateTo=2023-07-26&_=1690339829791'
    parse_url(url1)
    parse_url(url2)
    print(top_20_url(1))



