class Scan < ActiveRecord::Base
  has_many :results, :dependent => :destroy
  
  def self.search ac_types, php=false
    require 'net/http/persistent'
    pages = []
    http = Net::HTTP::Persistent.new(nil, (PROXY ? URI.parse(PROXY) : nil))
    @protect_num = 0 #防ban IP对策
    ac_types.each do |ac_type|
      date_now =  Time.now
      head_url = "http://#{BILI_PATH}/video/#{ac_type.page_name}"
      first_page = search_again_and_again("#{head_url}-1.html", http)
      page_count = first_page.xpath(%<//span[@class='pageinfo']/strong[1]>).text.to_i
      first_page.css("div.listpg").each{|f| pages<<[ac_type.id, f]}
      #print  schedule in console
        puts "read #{ac_type.page_name} pages(#{page_count})."
        print "|"
      #end
      th_count = 5
      threads = ThreadGroup.new     
      (2..page_count).each do |i|
        tmp_url = php ? "http://#{BILI_PATH}/plus/list.php?tid=#{ac_type.page_code}&PageNo=#{i}" : "#{head_url}-#{i}.html"
        while
          if th_count > 0
            th_count -= 1
            threads.add(Thread.new(i){ |i0|
                list_again_and_again(tmp_url, http).each{|t| pages << [ac_type.id, t]}
                print (i0 % 50).zero? ? "|" : ((i0 % 2).zero? ? "." : "" )
                th_count += 1
            })
            false
          else
            sleep 0.4
            true
          end
        end
      end
      while threads.list.select{|th| th.alive?}.size > 0
        print %(#{threads.list.select{|th| th.alive?}.size}waits...   ) if threads.list.select{|th| th.alive?}.size > 1
        sleep 1
      end
      print "#{Time.now - date_now} second complete!\n"
    end
    pages
  end

  def self.search_single ac_type, page_num
    require 'net/http/persistent'
    @protect_num = 0
    http = Net::HTTP::Persistent.new(nil, (PROXY ? URI.parse(PROXY) : nil))
    head_url = "http://#{BILI_PATH}/video/#{ac_type.page_name}"
    tmp_url = "#{head_url}-#{page_num}.html"
    search_again_and_again(tmp_url, http)    
  end

  def search_single_and_add ac_type, page_num
    add_results Scan.search_single(ac_type, page_num)   
  end
  
  def self.work_info_fixed listpg, ac_type_id, work_update=false
    title_a = listpg.css("a.title[1]").first
    wid = Scan.get_wid title_a
    name = title_a.text
    cdate = listpg.css("div.date").text
    author_name =  listpg.search("div[@class='upzhu']/b").text
    pic_path = listpg.search("img").first.get_attribute("src")
    work = Work.find_by_wid wid
    if work
        work.update_attributes :cdate => cdate,
                                              :author_name => author_name,
                                              :ac_type_id => ac_type_id,
                                              :pic_path => pic_path  if work_update
    else
        work = Work.create! :name => name,
                                           :wid => wid,
                                           :cdate => cdate,
                                           :ac_type_id => ac_type_id,
                                           :author_name => author_name,
                                           :pic_path => pic_path rescue p("#{wid} can't create")
    end
    work
  end
  
  def add_results listpg
    title_a = listpg.css("a.title[1]").first
    work = Work.find_by_wid(Scan.get_wid(title_a))
    if work
      data_div = listpg.search("div[@class='dinfo']/b")
      Result.create! :work_id => work.id,
                     :scan_id => self.id,
                     :clicks =>  data_div[0].text,
                     :comments => data_div[2].text,
                     :danmu => data_div[1].text,
                     :stows => data_div[3].text
    end
  end
  
  def self.search_again_and_again url, http, count = 0
    begin
      print "again~#{count}" unless count == 0
      if @protect_num > 188
        sleep 0.6
        @protect_num = 0
      end
      re = Timeout.timeout(10, ThreadError){http.request(URI.parse(url))}
      @protect_num += 1
      Nokogiri::HTML(re.body, nil ,'utf-8')
    rescue
      search_again_and_again(url, http, count+1)
    end
  end

  def self.list_again_and_again url, http
    tmp_page = search_again_and_again(url, http)
    tmp_lists = tmp_page.css("div.listpg")
    if tmp_lists.empty?
      unless tmp_page.to_s.include?("您<font color=\"#red\">无权访问</font>本页面")
        p tmp_page.to_s
        tmp_lists = list_again_and_again url, http
      end
    end
    tmp_lists
  end

  def self.get_wid title_a
    href = title_a.get_attribute("href")
    wid = href.split('/av')[1].to_i
    wid = href.split('id=').last if wid.zero?
    wid
  end
end
