from bs4 import BeautifulSoup
import queue
import random
import time
import requests, os, re
from collections import namedtuple, defaultdict
from urllib.parse import urlencode
from urllib import parse
import pandas as pd
import csv
import json

#用户代理池
user_agent_list = [
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 "
    "(KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
    "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 "
    "(KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 "
    "(KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
    "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 "
    "(KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
    "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 "
    "(KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
    "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 "
    "(KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
    "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 "
    "(KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 "
    "(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
    "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 "
    "(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 "
    "(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 "
    "(KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 "
    "(KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 "
    "(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 "
    "(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 "
    "(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
    "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 "
    "(KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
    "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 "
    "(KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
    "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 "
    "(KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
    'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.116 Safari/537.36'

]
# 必须登录才能爬取，所以要填写用户cookie
cookie = 'oschina_new_user=false; user_locale=zh-CN; remote_way=http; sensorsdata2015jssdkchannel={"prop":{"_sa_channel_landing_url":""}}; slide_id=9; tz=Asia/Shanghai; Hm_lvt_24f17767262929947cc3631f99bfd274=1669280716,1669563733,1669863553,1669908684; Hm_lvt_6bc840df1e0b2cbbd5d0aab3e06b2610=1668419073,1668499284,1669863563,1669908706; Hm_lpvt_6bc840df1e0b2cbbd5d0aab3e06b2610=1669908803; gitee_user=true; sensorsdata2015jssdkcross={"distinct_id":"8989432","first_id":"184ce505bf0160-0734f0329161f9c-7d5d5476-1327104-184ce505bf11140","props":{"$latest_traffic_source_type":"直接流量","$latest_search_keyword":"未取到值_直接打开","$latest_referrer":"","$latest_utm_source":"pzpc","$latest_utm_medium":"sem","$latest_utm_campaign":"home","$latest_utm_content":"competition","$latest_utm_term":"github"},"$device_id":"178de8f7582332-0d29e0f040ed83-7d667961-1327104-178de8f7583606","identities":"eyIkaWRlbnRpdHlfY29va2llX2lkIjoiMTg0NjZiOTNmODVmMTgtMDkxMjExNDcwMTc5OTMtN2Q1ZDU0NzQtMTMyNzEwNC0xODQ2NmI5M2Y4NmFhZSIsIiRpZGVudGl0eV9sb2dpbl9pZCI6Ijg5ODk0MzIifQ==","history_login_id":{"name":"$identity_login_id","value":"8989432"}}; Hm_lpvt_24f17767262929947cc3631f99bfd274=1669911412; gitee-session-n=VVVVdld0bzNCNVAzVktSaUxCbHJwUXVGVFZQUzdUVVdaRVVhREVxTjZLL2JaSG1TY3FhMjVvNks3NUFmRFZSbUppaWRrYXhMc3RwM0pVMkFDZFNqWTU0UHJabmwwYVhzWHhTZWlWSlhpK25CVUhncDJPdkY4NUxMOU9rMnNSRlgzT0pkNEF1aWowajY5R3JqUEFOVzNwNVREaFhGU1RQMXE1SXk2SGE2OFYzWDhtU0VTYzhkcjVHd080enl4RFZpTHozYnQ3Zy9YbTNkZ0M1MkVsSkJiazQwU3JWK2VPeWNZMW5lcGwwZS90SUdrZUNwUkhCcXhFQ1RhSEt3aWpiVFFsQ1RVMWdLNVJ2amJNQXdyWkx3R0J4ZmxoWDIzU0FIVUFrYnBaQzBPem1WRGFCZXNZM3B3WWQ2ODlDSytmc2lxdHg4ci9yVVh5YTgwZkdSb1lqalVoQUFMZXB4VVdQVHNib0RUOVRYWWlmaUJVWWJPSjJLbnRYUCsxaG1QMlpyNmVtdUhSY3pDN3ByZlVvUUF5OVFPc25pYUpLemxQQUZEYU1lWVd4RkdZR0hub2VlcHIwTXB5aVdGeXJuSGZWQmtVdjRRQnp5K0o4dXpWL1ZDdWpBUGp5cEFXMXgwbzN1eEdMMTZrVkdYMGdJYUpydDRkU01qbmxYVWNndzFNUEo5cy9qNEQ0c0ZldTBEOEhHdGV6RWNnakpGeXlNZy9JOFNkWDV4cFN5R2U5dkx2OE9EOWVFTHZtNkxCdm9ERXliZXp5eUFYTU9qVkFqa1FjOWsvdG9GNnZMOHNzaHV1bE9hWFFPaHlxNU0vWFAyUVdCWlJocWkzOFpETW5aY2JtRUg3OFRVazhYclBWM2J3U0tJUmdxK1lQUFNmRGhMblc0UDhOVm1CYlhNV2xLRFZBSjh4dEt6UG1YWENwUjR0WXpqeUM1UDU3SkduLytGWHo5OXdKSXJBPT0tLXFVZ1pRYW0xaUdxQ0RFc1hqKzlxZVE9PQ==--cf16005b4fb82311870b26c5e018f0d3cce59557'
cookie=cookie.encode('utf-8')
# Requests设置请求头Headers
headers = {
    'User-Agent': random.choice(user_agent_list),
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
    'Cookie' : cookie
}

#获取软件包仓库中包的基本信息（包括包名，简介，仓库是否开放）  参数是要爬取的页数（完整为450页）
def crawl_package_info(page):
    nowpage = 1
    urls = []
    # 设置url参数，参数来源于搜索页面的url
    for nowpage in range(1, page+1):
        params = {
            '_': '1669908686206',
            'page': nowpage,
            'sort': 'name',
        }
        url = 'https://gitee.com/organizations/src-openeuler/projects?' + urlencode(params)
        urls.append(url)

    all_package_name = []
    all_package_description = []
    all_package_close = []
    # 记得改成450！！！测试用简单数据
    for i in range(0, page):
        response = requests.get(urls[i], headers=headers)
        text = response.text
        soup = BeautifulSoup(text, 'lxml')
        # 软件包名称
        # package_name = soup.select('div.project-list-item > h4 > span > span.project-title > a.repository')
        package_name = soup.select('div.project-list-item > h4 > span > span.project-title')
        # 软件包简介（可能为空）
        package_imf = soup.select('div.project-list-item > p.description')

        for item in package_name:
            item1 = item.select('a.repository')  # item1 是软件包仓库名称
            package = item1[0].get('href')
            item2 = item.select('div')  # 判断仓库是否关闭  div class="ui label basic mini red ml-1"
            if (len(item2) == 0):
                all_package_close.append('open')  # 开
            else:
                all_package_close.append('close')  # 关
            all_package_name.append(package)
        for item in package_imf:
            description = str(item.string).replace('\n', '')
            all_package_description.append(description)

        print('yes ' + str(i))
    print(all_package_name)
    all_package = []
    for i in range(len(all_package_name)):
        temp = []
        temp.append(all_package_name[i][15:])
        temp.append(all_package_close[i])
        temp.append(all_package_description[i])
        all_package.append(temp)

    with open('package.csv', 'w', encoding='UTF8', newline='') as f:
        writer = csv.writer(f)
        # write the data
        writer.writerows(all_package)



#通过包名爬取openEuler软件包仓库的配置文件信息（主要爬取了 BuildRequires信息和 Requires信息以及包的version和release），传入参数是data包的名称
def crawl_package_spec(data):
    urls = []
    package = []
    for item in data:
        url = 'https://gitee.com/src-openeuler/' + item + '/raw/master/' + item + '.spec'
        urls.append(url)  # 配置文件url
        package.append(item)  # 包名

    # 一些不同的数据库格式
    urls[2] = 'https://gitee.com/src-openeuler/A-Tune/raw/master/atune.spec'
    urls[3] = 'https://gitee.com/src-openeuler/A-Tune-BPF-Collection/raw/master/atune_bpf_collection.spec'
    urls[4] = 'https://gitee.com/src-openeuler/A-Tune-BPF-Collection/raw/master/atune_bpf_collection.spec'
    BuildRequires = []
    Requires = []
    Versions = []
    Releases = []
    j = 0

    for specurl in urls:
        response = requests.get(specurl, headers=headers)
        split = response.text.split('\n')
        nowbr = ""
        nowr = ""
        for i in split:
            if (i.find("BuildRequires") == 0):
                str1 = i.split(":", 1)  # 取：后面的内容
                if (len(str1) == 1):
                    continue
                str2 = str1[1]
                # b  = str1.split()   #分割的代码
                br = str2.strip()  # 没有分割的buildrequires
                nowbr += br
                nowbr += " "
            elif (i.find("Requires") == 0):
                str1 = i.split(":", 1)  # 取：后面的内容
                if (len(str1) == 1):
                    continue
                str2 = str1[1]
                # b = str1.split()    #分割的代码
                r = str2.strip()  # 没有分割的requires
                nowr += r
                nowr += " "
            else:
                continue
        BuildRequires.append(nowbr)
        Requires.append(nowr)
        version = ""
        release = ""
        for i in split:
            if (version != "" and release != ""):
                break
            if (i.find("Version") == 0):
                str1 = i.split(":", 1)[1]  # 取：后面的内容
                # b  = str1.split()   #分割的代码
                version = str1.strip()
            elif (i.find("Release") == 0):
                str1 = i.split(":", 1)[1]  # 取：后面的内容
                # b = str1.split()    #分割的代码
                release = str1.strip()
            else:
                continue
        Versions.append(version)
        Releases.append(release)
        print("now" + str(j))
        j += 1

    all_package = []
    for i in range(len(package)):
        temp = []
        temp.append(package[i])
        temp.append(Versions[i])
        temp.append(Releases[i])
        temp.append(BuildRequires[i])
        temp.append(Requires[i])
        all_package.append(temp)

    with open('spec.csv', 'w', encoding='UTF8', newline='') as f:
        writer = csv.writer(f)
        writer.writerow(["package", "Version", "Release", "BuildRequires", "Requires"])
        # write the data
        writer.writerows(all_package)