svg linearization
[mirrors/Programs.git] / php / crawler / probably-more-old / old / crawler_5.5.phps
1 #!/usr/bin/php
2 <?php
3 //HSp33der Web Crawler 5.5
4 //<-Harvie 2oo7
5 /*
6 * Description:
7 * This script crawls the web and printing found URLs to STDOUT.
8 *
9 * Installation:
10 * - Debian:
11 * # apt-get install php5-cli php5-curl curl
12 * $ chmod +x crawler.php
13 * $ ./crawler.php (or $ php crawler.php)
14 * - Windows:
15 * c:/>path/to/php5/php.exe crawler.php
16 * (you can add php.exe to PATH or associate .php files with it)
17 *
18 * Notes:
19 * - You can use EtherApe (or similar sniffer) to get graphical
20 * illustration of connecions (spider with your machine in center).
21 * - There are few bugs in PHP5,
22 * so this crawler may freeze after few minutes,
23 * but you can use $use_curlbin option (need some extra files).
24 * Check this for more info: http://bugs.php.net/bug.php?id=43098
25 *
26 * Tips & Tricks:
27 * - Make found URLs 100% unique:
28 * cat urls.txt | sort -u > uurls.txt && mv -f uurls.txt urls.txt
29 * - Show filesize and total urls count:
30 * du -h urls.txt && cat urls.txt | wc -l
31 * - Open another output pipe:
32 * tail -f urls.txt
33 * - Use some plugin (filter) to extract emails, etc...
34 * tail -f urls.txt | ./example_mail_extractor.php
35 *
36 * Dedicated to: SooM.cz (This is SooM project)
37 * Special thx2:
38 * - PHP, cURL (technology)
39 * - root.cz & php.net (help)
40 * - Linus Torvalds (because I am crawling from Linux)
41 */
42
43 ///SETTINGS//////////////////
44 //Basic
45 $seed = 'http://www.secunia.com/'; //Start crawling from this page
46 $file = 'urls.txt'; //File to save crawled URLs (use sort to make URLs 100% exclusive)
47 $delete_url_db = false; //Delete file with saved URLs before crawling
48 $restore = true; //Use last url from $file instead of seed
49 //Advanced
50 $max_size = 10000; //How many bytes download from each page?
51 $max_urls = 30; //Size of URL buffer (prebuffered URLs to crawl, if full, no more URLs will be added to queue)
52 $buffer_increase = false; //Increase buffer at buffer underrun?
53 $random_url = false; //Select random urls from page? (This may override URL filter)
54 $history_max = 1000; //How many last URLs keep in history? (historied URLs will not be crawled again...)
55 //Curl binary plugin
56 $use_curlbin = false; //Use interface to cURL executable instead of file_get_contents() (More speed, less crashes)
57 $curlpath = 'curl'; //Path to cURL executable binary (you can download it from http://curl.haxx.se/)
58 //System/PHP
59 $socket_timeout = 1; //How long you want wait for a webserver? (seconds)
60 $time_limit = 0; //How long run? (seconds; 0==infinite)
61 $memory_limit = '128M'; //Maximum memory to eat (if exhausted, crawler will stop)
62 //Debug
63 $debug = true; //Use debuging mode? (Print errors and statistics)
64
65 //Filter
66 $eregi_url_blacklist = array( //Most important thing when crawling ;D
67 '(W3\.org|W3C)', //Hell knows...
68 '(shop|xxx|porn|lesbian|hot)', //Commercial sites
69 '(google|209.85.135.104|yahoo.com|amazon.com|youtube.com)', //Big sites
70 '(seznam.cz|centrum.cz|atlas.cz|zoznam.sk|quick.cz)', //Big local sites
71 '.\.(css|ico|gif|jpg|png|bmp|cgi|js|vbs)', //Misc. webpage content
72 '.\.(avi|mpg|mpeg|mov|wmv|wm|mp3|ogg|wma)', //Multimedia files
73 '.\.(pdf|swf|flv|cfm)', //Other text files
74 '.\.(exe|zip|rar|gz|bz|bz2|tar)' //Big and binary files
75 );
76
77
78 ///FUNCTIONS/////////////////
79 function shell_curl_get($url, $timeout=0, $maxdata=0) { //I made this cURL interface cause file_get_contents() absolutely sux
80 $curl = 'curl';
81 if(isset($GLOBALS['curlpath'])) $curl = $GLOBALS['curlpath'];
82 $args = "--get --location --connect-timeout $timeout --max-time $timeout --compressed --stderr curl.debug";
83 $url = escapeshellcmd(trim($url));
84 $cmd = "$curl --url \"$url\" $args";
85 if($GLOBALS['debug']) echo("-Ex: $cmd\n");
86 $fp = popen($cmd, 'r');
87 stream_set_timeout($fp, $timeout);
88 stream_set_blocking($fp, 0);
89 //if($timeout>0) sleep($timeout);
90 $data = '';
91 while(!feof($fp)) {
92 $in = fgets($fp, 100);
93 $data .= $in;
94 if($maxdata>0 && strlen($data)>=$maxdata) break;
95 if($in == '') usleep(1000000/8);
96 }
97 @fclose($fp);
98 return $data;
99 }
100
101 function check_crawl_url($url) { //Use this function to determine if url is wanted
102 foreach($GLOBALS['eregi_url_blacklist'] as $black_url) {
103 if(eregi($black_url, $url)) return(0);
104 }
105 if(in_array($url, $GLOBALS['urls'])) return(0);
106 $file_url = explode('?', $url, 2); $file_url = $file_url[0];
107 if(in_array($file_url, $GLOBALS['history'])) return(0);
108 return(1);
109 }
110
111
112 ///CODE//////////////////////
113 //Init
114 if($restore && is_file($file)) { //Load last url as seed
115 $urls[1]=$seed; //original seed as backup (if no urls found at restored page)
116 if($debug) echo("!Restoring: ");
117 $rest = fopen($file, 'r');
118 while(!feof($rest)) {
119 $seed = $new;
120 $new = fgets($rest);
121 }
122 fclose($rest);
123 $seed = trim($seed);
124 if($debug) echo($seed."\n");
125 $urls[0]=$seed;
126 $seed=$urls[1];
127 }
128
129 if($delete_url_db) @unlink($file);
130 ini_set('default_socket_timeout',$socket_timeout);
131 set_time_limit($time_limit);
132 ini_set('memory_limit', $memory_limit);
133
134 //Loop
135 if($debug) { $u = 0; $c = 0; $t = time(); }
136 $history[0] = '';
137 $fp = fopen($file, 'a+');
138 while(true) { //MAIN L00P
139 if(sizeof($urls) <= 0) {
140 $urls='';
141 $urls[0]=$seed;
142 if($buffer_increase) $max_urls++; //Increase size of buffer by one
143 if($debug) echo("!Buffer underrun! !Buffer size is: $max_urls!\n");
144 }
145
146 //Get URL from buffer, download few bytes and write down URLs to array:
147 $url = array_shift($urls);
148 if($debug) echo("-Parsing: $url\n");
149 if(!$use_curlbin) { $page = @file_get_contents($url, false, null, 0, $max_size); }
150 else { $page = shell_curl_get($url, $socket_timeout, $max_size); }
151 preg_match_all('(http:\/\/[_a-zA-Z0-9\.\-]+\.[a-zA-Z]{2,4}\/{1}[-_~&=\ ?\.a-z0-9\/]*)',htmlspecialchars_decode($page), $new_urls);
152 $new_urls = $new_urls[0];
153
154 foreach($new_urls as $new_url) { //Process grabed URLs
155 if($debug) $c++;///Debug
156 if(check_crawl_url($new_url)) {
157 echo($new_url."\n");
158 fwrite($fp, $new_url."\n");
159 $file_url = explode('?', $new_url, 2); $file_url = $file_url[0];
160 array_push($history, $file_url);
161 while(sizeof($history) > $history_max) @array_shift($history);
162 if($random_url) { if(sizeof($urls) < $max_urls) array_push($urls, $new_urls[rand(0,sizeof($new_urls)-1)]); }
163 else { if(sizeof($urls) < $max_urls) array_push($urls, $new_url); }
164 if($debug) { ///Debug Block
165 $u++;
166 $uspeed = round($u/(time()-$t+1), 2);
167 $cspeed = round($c/(time()-$t+1), 2);
168 $time = round((time()-$t)/60, 1);
169 $buffered = sizeof($urls);
170 $historied = sizeof($history);
171 $memory = round(memory_get_usage()/1000000, 2);
172 echo("+$u URLs; $c Downloaded; UpTime: $time mins; Buffered: $buffered URLs; History: $historied URLs; Speed: $uspeed URLs/s, $cspeed Downloads/s; Memory used: $memory/$memory_limit\n");
173 }
174 }
175 }
176 $new_urls = ''; //Memory cleanup
177 }
178
179 ##EOF
This page took 0.709278 seconds and 4 git commands to generate.