michael@0: #!/usr/bin/perl michael@0: # michael@0: # This Source Code Form is subject to the terms of the Mozilla Public michael@0: # License, v. 2.0. If a copy of the MPL was not distributed with this michael@0: # file, You can obtain one at http://mozilla.org/MPL/2.0/. michael@0: michael@0: # This is a modified version of Chris Hofmann's michael@0: # infamous "browser buster" test harness. It's a bit simpler (CGI michael@0: # instead of using cookies; IFRAME instead of FRAMESET), and has some michael@0: # extra parameters that make it a bit easier to test with, but it's michael@0: # pretty faithful otherwise. michael@0: # michael@0: # It accepts a couple of parameters, including michael@0: # michael@0: # file= Set this to the name of the file containing michael@0: # the URLs that you want the buster to cycle through. This michael@0: # might be a security hole, so don't run this script on a michael@0: # server with s3kret stuff on it, mmkay? michael@0: # michael@0: # page= This is used to maintain state, and is the line michael@0: # number in the file that the buster will pull up in the michael@0: # IFRAME. Set if by hand if you need to for some reason. michael@0: # michael@0: # last= The buster will run until it's exhausted all michael@0: # the URLs in the file, or until it reaches this line in the michael@0: # file; e.g., setting it to "5" will load five URLs. michael@0: # michael@0: # refresh= The timeout (in seconds) to wait before doing michael@0: # a page refresh, and thus loading the next URL. Defaults to michael@0: # thirty. michael@0: michael@0: use CGI; michael@0: michael@0: # Find the page'th URL in the file with the specified name michael@0: sub FindURL($$) michael@0: { michael@0: my ($file, $page) = @_; michael@0: michael@0: open URLS, $file michael@0: || die("can't open $::File"); michael@0: michael@0: LINE: while () { michael@0: next LINE if /^#/; michael@0: last LINE unless --$page; michael@0: } michael@0: michael@0: close URLS; michael@0: michael@0: chomp; michael@0: return $_; michael@0: } michael@0: michael@0: # Scrape parameters michael@0: $::Query = new CGI; michael@0: michael@0: $::File = $::Query->param("file"); michael@0: $::File = "top100.txt" unless $::File; michael@0: michael@0: $::Page = $::Query->param("page"); michael@0: $::Page = 0 unless $::Page; michael@0: $::URL = FindURL($::File, ++$::Page); michael@0: michael@0: $::Last = $::Query->param("last"); michael@0: $::Last = -1 unless $::Last; michael@0: michael@0: $::Refresh = $::Query->param("refresh"); michael@0: $::Refresh = 30 unless $::Refresh; michael@0: michael@0: # Header michael@0: print qq{Content-type: text/html michael@0: michael@0: michael@0: michael@0: }; michael@0: michael@0: # Meat michael@0: if ($::URL && ($::Page <= $::Last || $::Last == -1)) { michael@0: # Make a web page that'll load $::URL in an IFRAME, with michael@0: # a meta-refresh that'll reload us again in short order. michael@0: print qq{ michael@0: michael@0: BrowserBuster II: $::URL michael@0: michael@0: michael@0: michael@0: michael@0: }; michael@0: print "$::File: $::URL"; michael@0: if ($::Last != -1) { michael@0: print " ($::Page of $::Last)
"; michael@0: } michael@0: print qq{ michael@0: