#!/usr/bin/python
# encoding: utf-8
import requests
import threading
from lxml import etree
import sys
import os
import datetime
import re
import random
import time

reload(sys)

sys.setdefaultencoding('utf-8')




def getNewUrlList(zhi):
    global url
    header ={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.115 Safari/537.36'}
    count=1
    dic={}
    dic['name']=[]
    

    while (count < zhi):

        url="https://www.zhipin.com/c101010100-p100103/?page=1&ka=page-1"
        url=url+str(count)
        # print(url)
        response=requests.get(url,headers=header)
        html=response.content.decode("utf8")
        selector=etree.HTML(html)
        contents = selector.xpath('//main[@id="main"]/article')

        for eachlink in contents:

            
                url = eachlink.xpath('header/h2/a/@href')[0]
                title = eachlink.xpath('header/h2/a/text()')[0]
            
                dic['name'].append([title,url])
            

        # return list2        

        count = count + 1
    return dic


# def getNewContent(urlList):
#     for item in urlList:



def test(posturl):
    now_time = datetime.datetime.now()
    time1=datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')

    dir_name=time1
    is_zai=os.path.exists('./file/'+str(dir_name))
   
    if is_zai:
        print("在的")
    else:
        print("不在")
        os.mkdir('./file/'+str(dir_name))

    for item in posturl:
        # print(item[0])
        # print(item[1])

        header ={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.115 Safari/537.36'}
        url=item[1]
        response=requests.get(url,headers=header)
        html=response.content.decode("utf8")
        selector=etree.HTML(html)
        # contents = selector.xpath('//div[@class="row"]/div[@class="col-xs-12 col-md-12 col-lg-12 b-content-word"]/div[@class="js-content"]')
        # print(selector)
        bloger = selector.xpath("//div[@class='entry-content']")
        # print(bloger)
        context=bloger[0].xpath('string(.)').strip()
        print(context)
        
        t = time.time()
        three=int(t)
        one=random.randint(100000,900000)
        two=random.randint(100000,900000)
        last=str(one)+str(two)+str(three)
        txtName = "./file/"+str(dir_name)+"/"+str(last)+".txt"
        f=file(txtName, "a+")
        f.write(context)
        f.close()


def validateTitle(title):
    rstr = r"[\/\\\:\*\?\"\<\>\|\、\.]"  # '/ \ : * ? " < > |'
    new_title = re.sub(rstr, "", title)  # 替换为下划线
    return new_title

def go(zhi):
        
        header = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
        'Accept-Encoding': 'gzip, deflate, br',
        'Accept-Language': 'zh-CN,zh;q=0.9'
        }
        # print(url)
        count=400
        while (count < zhi):
        # url="https://www.zhipin.com/c101010100-p100101/?page="+str(count)+"&ka=page-"+str(count)
            # url="https://search.51job.com/list/010000,000000,0000,00,9,99,java,2,"+str(count)+".html?lang=c&stype=1&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=&dibiaoid=0&address=&line=&specialarea=00&from=&welfare="
            url="https://search.51job.com/list/000000,000000,0000,00,9,99,java,2,"+str(count)+".html?lang=c&stype=1&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=&dibiaoid=0&address=&line=&specialarea=00&from=&welfare="
            # print(url)
            print("第"+str(count)+"页了")
            txtName = "./file/xin_51jb.txt"
            f=file(txtName, "a+")
            # f.write("************************************************************")
            f.write(url)
            # f.write("************************************************************")
            f.close()

            response=requests.get(url,headers=header)
            html=response.content.decode("gbk")
            selector=etree.HTML(html)
            contents = selector.xpath('//div[@class="dw_table"]/div[@class="el"]')
            
            
            # company_url="https://www.zhipin.com"
            # contents.pop(0)
            print(len(contents))
            for eachlink in contents:
                    
                # print(i)
                    company = eachlink.xpath('span[@class="t2"]/a/text()')[0]
                    name= eachlink.xpath('p/span/a/text()')[0]
                    city= eachlink.xpath('span[@class="t3"]/text()')[0]
                    # key= len(eachlink.xpath('span[@class="t4"]/text()'))
                    key="0"
                    if len(eachlink.xpath('span[@class="t4"]/text()'))<1:
                        key="0"
                    else:
                        key= eachlink.xpath('span[@class="t4"]/text()')[0]
                        # key="有的"
                    company=company.replace(' ','')
                    name=name.replace(' ','')
                    city=city.replace(' ','')
                    # key=key.replace(' ','')


            #         key=eachlink.xpath('div[@class="job-primary"]/div[@class="info-primary"]/h3[@class="name"]/a/span[@class="red"]/text()')[0]
            #         url = eachlink.xpath('div[@class="job-primary"]/div[@class="info-primary"]/h3[@class="name"]/a/@href')[0]
            #         url=company_url+url
            #         # print(eachlink)
                    zhi=name+"============="+company+"--------"+city+"------"+str(key)
                    # print(name+"============="+company+"--------"+city+"------"+str(key))

                    txtName = "./file/51jb.txt"
                    f=file(txtName, "a+")
                    f.write(zhi)
                    f.close()
                    # print()
            #         print(key)
                    # print(url)
            sui=random.randint(1,5)
            print("休眠"+str(sui))
            time.sleep(sui)
            count=count+1       
                # dic['name'].append([title,url])
def log(context):
    txtName = "./log/log.txt"
    f=file(txtName, "a+")
    # f.write("************************************************************")
    f.writelines(context+"\n") 
    # f.write("************************************************************")
    f.close()



def xin():
        header = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
            'Accept-Encoding': 'gzip, deflate, br',
            'Accept-Language': 'zh-CN,zh;q=0.9'
        }
            # print(url)
        count=1
       
        while (count < 523):
            url="https://search.51job.com/list/000000,000000,0000,00,9,99,php,2,"+str(count)+".html?lang=c&stype=1&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=&dibiaoid=0&address=&line=&specialarea=00&from=&welfare="
        
            response=requests.get(url,headers=header)
            html=response.content.decode("gbk")
            print(html)
            selector=etree.HTML(html)
            contents = selector.xpath('//div[@class="dw_table"]/div[@class="el"]')
            
            
            # company_url="https://www.zhipin.com"
            # contents.pop(0)
            
            log("第"+str(count)+"页了--"+str(len(contents))+"条数据")
            for eachlink in contents:
                    
                # print(i)
                    
                    company = eachlink.xpath('span[@class="t2"]/a/text()')[0]
                    url= eachlink.xpath('p/span/a/@href')[0]
                    name= eachlink.xpath('p/span/a/text()')[0]
                    city= eachlink.xpath('span[@class="t3"]/text()')[0]
                    # key= len(eachlink.xpath('span[@class="t4"]/text()'))
                    key="0"
                    if len(eachlink.xpath('span[@class="t4"]/text()'))<1:
                        key="0"
                    else:
                        key= eachlink.xpath('span[@class="t4"]/text()')[0]
                        # key="有的"
                    company=company.replace(' ','')
                    name=name.replace(' ','')
                    city=city.replace(' ','')
                    # key=key.replace(' ','')


            #         key=eachlink.xpath('div[@class="job-primary"]/div[@class="info-primary"]/h3[@class="name"]/a/span[@class="red"]/text()')[0]
            #         url = eachlink.xpath('div[@class="job-primary"]/div[@class="info-primary"]/h3[@class="name"]/a/@href')[0]
            #         url=company_url+url
            #         # print(eachlink)
                    zhi=name+"============="+company+"--------"+city+"------"+str(key)+"---------------"+url
                    # print(name+"============="+company+"--------"+city+"------"+str(key))

                    txtName = "./file/php.txt"
                    f=file(txtName, "a+")
                    f.write(zhi)
                    f.close()
                    # print()
            #         print(key)
                    # print(url)
            sui=random.randint(1,2)
            # print("休眠"+str(sui))
            log("休眠"+str(sui))
            time.sleep(sui)
            count=count+1     

if __name__=="__main__":
    xin()
    # info= getNewUrlList(94);  
    # test(info['name']) 
   