import requests
from bs4 import BeautifulSoup
import re
import lxml.html
import time

etree = lxml.html.etree

class ZhiCity:

    #初始化函数
    def __init__(self):
        self.header = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36',
            'Content-type': 'text/html',
            'charset': 'utf-8'}
        self.url = "http://www.taskcity.com/projects/skills"
        self.secondurl = ""

    #获
    def second_grap(self):
        response = requests.get(self.secondurl,params={},headers = self.header)
        response.encoding="utf-8"
        if response.status_code == 200:
            html = etree.HTML(response.text)
            namelist = html.xpath("/html/body/div[1]/div/div[2]/div[1]/h1/text()")
            name = str(namelist)[10:-16]
            return name




    def get_project(self):
        for i in range(1,2):


            response = requests.get(self.url,params={"page":str(i)},headers = self.header)

            #print(html.content.decode("utf-8"))

            #soup = BeautifulSoup(html.text, "html.parser" )
            #name = soup.select("#project_title_name")

            response.encoding="utf-8"

            if response.status_code == 200:
                html = etree.HTML(response.text)
                for j in range(1,11):
                    els = html.xpath("/html/body/div[1]/div[2]/div/div[1]/div["+str(j)+"]/div/div[1]/h4/a/@href")

                    #str = re.sub("[A-Za-z0-9\!\%\[\]\,\。\@\:\;\//\：\']", "", a)
                    #name = str(els)[2:]


                    #print(str(els)[2:-2])
                    self.secondurl = "http://www.taskcity.com" +str(els)[2:-2]
                    #print(self.secondurl)
                    self.second_grap()

            #    newback = html.xpath(/html/body/div[1]/div[2]/div/div[1]/div[1]/div/div[1]/h4/a)


if __name__ in "__main__":
    cls = ZhiCity()
    cls.get_project()
