require 'net/http'
require 'json'
def query_url(url)
  return Net::HTTP.get(URI.parse(url));
end

def save_url(url,dir,filename)
  filename = url[url.rindex('/')+1, url.length-1] if filename == nil || filename.empty?
  require 'open-uri'
  Dir.mkdir("#{dir}") if dir != nil && !dir.empty? && !FileTest.exist?(dir)
  open(url) do |fin|
    if true
      File.new("#{dir}#{filename}","wb").close
      open("#{dir}#{filename}","wb") do |fout|
        while buf = fin.read(1024) do
          fout.write buf
          STDOUT.flush
        end

      end
    end
  end
end
def justTest()
  "xxxkkkkkkkkxxbc"
end


def justTest2()
    "11ff23ee3"

end

#将网页中的图片保存到本地。简单示例
def save_pic_toFile(sUrl="www.baidu.com",sSaveFilePath="d:/test.png")
  Net::HTTP.start(sUrl) { |http|
    resp = http.get("/img/bdlogo.gif")
    open(sSaveFilePath, "wb") { |file|

      file.write(resp.body)
    }
  }end

#使用 HTTPS
def httpsOpen()
  require 'net/https'
  url = URI.parse('https://example.com/')
  http = Net::HTTP.new(url.host, url.port)
  http.use_ssl = true if url.scheme == 'https'
  request = Net::HTTP::Get.new(url.path)
  puts http.request(request).body

end

#
require 'rubygems'
require 'mechanize'

#puts page.search("div.mod").text
#puts page.class.instance_methods false
#puts page.links[1].class.instance_methods false
#page.links.each do |link|
#
#  puts "#{link.text} : #{link.href}  :#{link.uri}"
#end
#md5加密
require 'digest'
puts Digest::MD5.hexdigest('admin')
#sha1加密
#require 'digest'
puts Digest::SHA1.hexdigest('admin')
#base64
require 'base64'
code = Base64.encode64('admin')
source = Base64.decode64(code)


def save_to_file(filename,content)
  open("#{filename}","wb") do |fout| #若文件不存在，自动新增
    fout.write content   #覆盖文件的原有内容
    STDOUT.flush
  end
end

def fetch_hgamecn_page(gameID) #eg:gameID =1566
  #agent = Mechanize.new
  #page = agent.get(url)
  #htmlContent = Nokogiri::HTML(page.content)
  #r = htmlContent.search('div#artcon') #返回值是个集合
  url="http://www.hgamecn.com/htmldata/article/"+gameID+".html"
  agent = Mechanize.new
  page = agent.get(url)
  htmlContent = Nokogiri::HTML(page.content)
  part1_content= htmlContent.search('div#artcon') #返回值是个集合


  #取得其它部分的页面超链接links
  other_part_links=page.links.select {|l| l.href.include?(gameID+"_") and l.text != "下一页>" unless l.href.nil?}

  other_part_links.each{|link|
    otherPage = link.click
    partN_content =otherPage.search("div#artcon") #返回值是个集合

    part1_content[0].inner_html+= partN_content[0].inner_html
  }

  #保存文件
  save_to_file(page.filename,htmlContent.to_html.sub("/css/hgc.css","hgc.css"))

  puts "已成功保存到#{page.filename}"

end


def fetch_doubanMusicApi(musicID)
  if musicID.nil?
    return nil
  end
  url="https://api.douban.com/v2/music/#{musicID}"
  agent = Mechanize.new
  page = agent.get(url)
  jResult = JSON.parse (page.content)

  return jResult

end

#fetch_hgamecn_page("http://www.hgamecn.com/htmldata/article/1566.html")
if  $0 == __FILE__
  puts "waiting... "
  command =readline().chomp
  while (command != "exit")
    fetch_hgamecn_page(command)
    puts "waiting..."
    command =readline().chomp
  end
  puts "bye!"
else
  puts "--- success Load file #{__FILE__} in irb"
end



