| 1 | #!/usr/bin/php |
| 2 | <?php |
| 3 | //HSp33der Web Crawler 5.6 |
| 4 | //<-Harvie 2oo7/8 |
| 5 | /* |
| 6 | * Description: |
| 7 | * This script crawls the web and printing found URLs to STDOUT. |
| 8 | * |
| 9 | * Installation: |
| 10 | * - Debian: |
| 11 | * # apt-get install php5-cli php5-curl curl sort |
| 12 | * $ chmod +x crawler.php |
| 13 | * $ ./crawler.php (or $ php crawler.php) |
| 14 | * - Windows: |
| 15 | * c:/>path/to/php5/php.exe crawler.php |
| 16 | * (you can add php.exe to PATH or associate .php files with it) |
| 17 | * |
| 18 | * Notes: |
| 19 | * - You can use EtherApe (or similar sniffer) to get graphical |
| 20 | * illustration of connecions (spider with your machine in center). |
| 21 | * - There are few bugs in PHP5, |
| 22 | * so this crawler may freeze after few minutes, |
| 23 | * but you can use $use_curlbin option (need some extra files). |
| 24 | * Check this for more info: http://bugs.php.net/bug.php?id=43098 |
| 25 | * |
| 26 | * Tips & Tricks: |
| 27 | * - Make found URLs 100% unique: |
| 28 | * cat urls.txt | sort -u > uurls.txt && mv -f uurls.txt urls.txt |
| 29 | * - Show filesize and total urls count: |
| 30 | * du -h urls.txt && cat urls.txt | wc -l |
| 31 | * - Open another output pipe: |
| 32 | * tail -f urls.txt |
| 33 | * - Use some plugin (filter) to extract emails, etc... |
| 34 | * tail -f urls.txt | ./example_mail_extractor.php |
| 35 | * |
| 36 | * Special thx2: |
| 37 | * - PHP, cURL (technology) |
| 38 | * - root.cz & php.net (help) |
| 39 | * - Linus Torvalds (because I am crawling from Linux) |
| 40 | */ |
| 41 | |
| 42 | ///SETTINGS////////////////// |
| 43 | //Basic |
| 44 | $seed = 'http://blog.harvie.cz/'; //Start crawling from this page |
| 45 | $file = 'urls.txt'; //File to save crawled URLs (use sort to make URLs 100% exclusive) |
| 46 | $delete_url_db = false; //Delete file with saved URLs before crawling |
| 47 | $restore = true; //Use last url from $file instead of seed |
| 48 | //Advanced |
| 49 | $max_size = 10000; //How many bytes download from each page? |
| 50 | $max_urls = 30; //Size of URL buffer (prebuffered URLs to crawl, if full, no more URLs will be added to queue) |
| 51 | $buffer_increase = true; //Increase buffer at buffer underrun? |
| 52 | $random_url = false; //Select random urls from page? (This may override URL filter) |
| 53 | $history_max = 1000; //How many last URLs keep in history? (historied URLs will not be crawled again...) |
| 54 | //Curl binary plugin |
| 55 | $use_curlbin = true; //Use interface to cURL executable instead of file_get_contents() (More speed, less crashes) |
| 56 | $curlpath = 'curl'; //Path to cURL executable binary (you can download it from http://curl.haxx.se/) |
| 57 | //Sort binary plugin |
| 58 | $use_sortbin = true; //Use sort to make URLs unique? |
| 59 | $sortpath = 'sort'; //Path to SORT executable binary (get it from your OS or UNXUtils for Windows) |
| 60 | $sort_each = $history_max; //Sort file after X new URLs |
| 61 | //System/PHP |
| 62 | $socket_timeout = 1; //How long you want wait for a webserver? (seconds) |
| 63 | $time_limit = 0; //How long run? (seconds; 0==infinite) |
| 64 | $memory_limit = '128M'; //Maximum memory to eat (if exhausted, crawler will stop) |
| 65 | //Debug |
| 66 | $debug = true; //Use debuging mode? (Print errors and statistics) |
| 67 | |
| 68 | //Filter |
| 69 | $eregi_url_blacklist = array( //Most important thing when crawling ;D |
| 70 | '(W3\.org|W3C)', //Hell knows... |
| 71 | '(shop|xxx|porn|lesbian|hot)', //Commercial sites |
| 72 | '(google|209.85.135.104|yahoo.com|amazon.com|youtube.com)', //Big sites |
| 73 | '(seznam.cz|centrum.cz|atlas.cz|zoznam.sk|quick.cz)', //Big local sites |
| 74 | '.\.(css|ico|gif|jpg|png|bmp|cgi|js|vbs)', //Misc. webpage content |
| 75 | '.\.(avi|mpg|mpeg|mov|wmv|wm|mp3|ogg|wma)', //Multimedia files |
| 76 | '.\.(pdf|swf|flv|cfm)', //Other text files |
| 77 | '.\.(exe|zip|rar|gz|bz|bz2|tar)' //Big and binary files |
| 78 | ); |
| 79 | |
| 80 | |
| 81 | ///FUNCTIONS///////////////// |
| 82 | function shell_sort_file($file, $tmpfile='.tmp') { |
| 83 | $tmpfile = $file.$tmpfile; |
| 84 | if($GLOBALS['debug']) echo("-SORTing $file (using tempfile $tmpfile)\n"); |
| 85 | @unlink($tmpfile); |
| 86 | system($GLOBALS['sortpath']." -u $file > $tmpfile"); |
| 87 | if(is_file($tmpfile) && (filesize($tmpfile) > 3)) { |
| 88 | unlink($file); |
| 89 | rename($tmpfile, $file); |
| 90 | } else { |
| 91 | if($GLOBALS['debug']) echo("-SORTing failed!!!\n"); |
| 92 | } |
| 93 | if($GLOBALS['debug']) echo("-SORTing done!\n"); |
| 94 | } |
| 95 | |
| 96 | function shell_curl_get($url, $timeout=0, $maxdata=0) { //I made this cURL interface cause file_get_contents() absolutely sux |
| 97 | $curl = 'curl'; |
| 98 | if(isset($GLOBALS['curlpath'])) $curl = $GLOBALS['curlpath']; |
| 99 | $args = "--get --location --connect-timeout $timeout --max-time $timeout --compressed --stderr curl.debug"; |
| 100 | $url = escapeshellcmd(trim($url)); |
| 101 | $cmd = "$curl --url \"$url\" $args"; |
| 102 | if($GLOBALS['debug']) echo("-Ex: $cmd\n"); |
| 103 | $fp = popen($cmd, 'r'); |
| 104 | stream_set_timeout($fp, $timeout); |
| 105 | stream_set_blocking($fp, 0); |
| 106 | //if($timeout>0) sleep($timeout); |
| 107 | $data = ''; |
| 108 | while(!feof($fp)) { |
| 109 | $in = fgets($fp, 100); |
| 110 | $data .= $in; |
| 111 | if($maxdata>0 && strlen($data)>=$maxdata) break; |
| 112 | if($in == '') usleep(1000000/8); |
| 113 | } |
| 114 | @fclose($fp); |
| 115 | return $data; |
| 116 | } |
| 117 | |
| 118 | function check_crawl_url($url) { //Use this function to determine if url is wanted |
| 119 | foreach($GLOBALS['eregi_url_blacklist'] as $black_url) { |
| 120 | if(eregi($black_url, $url)) return(0); |
| 121 | } |
| 122 | if(in_array($url, $GLOBALS['urls'])) return(0); |
| 123 | $file_url = explode('?', $url, 2); $file_url = $file_url[0]; |
| 124 | if(in_array($file_url, $GLOBALS['history'])) return(0); |
| 125 | return(1); |
| 126 | } |
| 127 | |
| 128 | |
| 129 | ///CODE////////////////////// |
| 130 | //Init |
| 131 | if($restore && is_file($file)) { //Load last url as seed |
| 132 | $urls[1]=$seed; //original seed as backup (if no urls found at restored page) |
| 133 | if($debug) echo("!Restoring: "); |
| 134 | $rest = fopen($file, 'r'); |
| 135 | while(!feof($rest)) { |
| 136 | $seed = $new; |
| 137 | $new = fgets($rest); |
| 138 | } |
| 139 | fclose($rest); |
| 140 | $seed = trim($seed); |
| 141 | if($debug) echo($seed."\n"); |
| 142 | $urls[0]=$seed; |
| 143 | $seed=$urls[1]; |
| 144 | } |
| 145 | |
| 146 | if($delete_url_db) @unlink($file); |
| 147 | ini_set('default_socket_timeout',$socket_timeout); |
| 148 | set_time_limit($time_limit); |
| 149 | ini_set('memory_limit', $memory_limit); |
| 150 | |
| 151 | //Loop |
| 152 | if($debug) { $u = 0; $c = 0; $t = time(); } |
| 153 | $history[0] = ''; |
| 154 | $sort_next = $sort_each; |
| 155 | $sorted_db = 0; |
| 156 | $fp = fopen($file, 'a+'); |
| 157 | while(true) { //MAIN L00P |
| 158 | if(sizeof($urls) <= 0) { |
| 159 | $urls=''; |
| 160 | $urls[0]=$seed; |
| 161 | if($buffer_increase) $max_urls++; //Increase size of buffer by one |
| 162 | if($debug) echo("!Buffer underrun! !Buffer size is: $max_urls!\n"); |
| 163 | } |
| 164 | |
| 165 | //Get URL from buffer, download few bytes and write down URLs to array: |
| 166 | $url = array_shift($urls); |
| 167 | if($debug) echo("-Parsing: $url\n"); |
| 168 | if(!$use_curlbin) { $page = @file_get_contents($url, false, null, 0, $max_size); } |
| 169 | else { $page = shell_curl_get($url, $socket_timeout, $max_size); } |
| 170 | preg_match_all('(http:\/\/[_a-zA-Z0-9\.\-]+\.[a-zA-Z]{2,4}\/{1}[-_~&=\ ?\.a-z0-9\/]*)',htmlspecialchars_decode($page), $new_urls); |
| 171 | $new_urls = $new_urls[0]; |
| 172 | |
| 173 | foreach($new_urls as $new_url) { //Process grabed URLs |
| 174 | if($debug) $c++;///Debug |
| 175 | if(check_crawl_url($new_url)) { |
| 176 | echo($new_url."\n"); |
| 177 | fwrite($fp, $new_url."\n"); |
| 178 | |
| 179 | if($use_sortbin) { //Handle sorting (unique URLs) |
| 180 | $sort_next--; |
| 181 | if($sort_next<=0) { |
| 182 | fclose($fp); |
| 183 | shell_sort_file($file); |
| 184 | if($debug) { |
| 185 | $sorted_db = 0; |
| 186 | $fp = fopen($file, 'r'); |
| 187 | while(!feof($fp)) { |
| 188 | fgets($fp); |
| 189 | $sorted_db++; |
| 190 | } |
| 191 | fclose($fp); |
| 192 | } |
| 193 | $fp = fopen($file, 'a+'); |
| 194 | $sort_next = $sort_each; |
| 195 | } |
| 196 | } |
| 197 | |
| 198 | $file_url = explode('?', $new_url, 2); $file_url = $file_url[0]; |
| 199 | array_push($history, $file_url); |
| 200 | while(sizeof($history) > $history_max) @array_shift($history); |
| 201 | if($random_url) { if(sizeof($urls) < $max_urls) array_push($urls, $new_urls[rand(0,sizeof($new_urls)-1)]); } |
| 202 | else { if(sizeof($urls) < $max_urls) array_push($urls, $new_url); } |
| 203 | if($debug) { ///Debug Block |
| 204 | $u++; |
| 205 | $sorted_db++; |
| 206 | $uspeed = round($u/(time()-$t+1), 2); |
| 207 | $cspeed = round($c/(time()-$t+1), 2); |
| 208 | $time = round((time()-$t)/60, 1); |
| 209 | $buffered = sizeof($urls); |
| 210 | $historied = sizeof($history); |
| 211 | $memory = round(memory_get_usage()/1000000, 2); |
| 212 | echo("+$u ($sorted_db sorted) URLs; $c Downloaded; UpTime: $time mins; Buffered: $buffered URLs; History: $historied URLs; To sort: $sort_next URLs; Speed: $uspeed URLs/s, $cspeed Downloads/s; Memory used: $memory/$memory_limit\n"); |
| 213 | } |
| 214 | } |
| 215 | } |
| 216 | $new_urls = ''; //Memory cleanup |
| 217 | } |
| 218 | |
| 219 | ##EOF |