iwla

iwla Commit Details

Date:2014-11-18 20:18:53 (6 years 8 months ago)
Author:Grégory Soutadé
Branch:dev, master
Commit:26688e4bf7c1bda7db98a0ca65fa09072f8b0113
Message:Initial commit

Changes:
Aiwla.py (full)
Aiwla_convert.pl (full)
Arobots.py (full)

File differences

iwla.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
#!/usr/bin/env python
import os
import re
import time
import glob
import imp
from robots import awstats_robots;
print '==> Start'
current_visit = {}
log_format = '$server_name:$server_port $remote_addr - $remote_user [$time_local] ' +\
'"$request" $status $body_bytes_sent ' +\
'"$http_referer" "$http_user_agent"';
log_format_extracted = re.sub(r'([^\$\w])', r'\\\g<1>', log_format);
log_format_extracted = re.sub(r'\$(\w+)', '(?P<\g<1>>.+)', log_format_extracted)
http_request_extracted = re.compile(r'(?P<http_method>\S+) (?P<http_uri>\S+) (?P<http_version>\S+)')
#09/Nov/2014:06:35:16 +0100
time_format = '%d/%b/%Y:%H:%M:%S +0100'
#print "Log format : " + log_format_extracted
log_re = re.compile(log_format_extracted)
uri_re = re.compile(r'(?P<extract_uri>[^\?]*)\?(?P<extract_parameters>.*)')
pages_extensions = ['/', 'html', 'xhtml', 'py', 'pl', 'rb', 'php']
viewed_http_codes = [200]
cur_time = None
print '==> Generating robot dictionary'
awstats_robots = map(lambda (x) : re.compile(x, re.IGNORECASE), awstats_robots)
def isPage(request):
for e in pages_extensions:
if request.endswith(e):
return True
return False
def appendHit(hit):
super_hit = current_visit[hit['remote_addr']]
super_hit['pages'].append(hit)
super_hit['bandwith'] += int(hit['body_bytes_sent'])
request = hit['extract_request']
if 'extract_uri' in request.keys():
uri = request['extract_uri']
else:
uri = request['http_uri']
hit['is_page'] = isPage(uri)
# Don't count redirect status
if int(hit['status']) == 302: return
if super_hit['robot'] or\
not int(hit['status']) in viewed_http_codes:
page_key = 'not_viewed_pages'
hit_key = 'not_viewed_hits'
else:
page_key = 'viewed_pages'
hit_key = 'viewed_hits'
if hit['is_page']:
super_hit[page_key] += 1
else:
super_hit[hit_key] += 1
def createGeneric(hit):
super_hit = current_visit[hit['remote_addr']] = {}
super_hit['viewed_pages'] = 0;
super_hit['viewed_hits'] = 0;
super_hit['not_viewed_pages'] = 0;
super_hit['not_viewed_hits'] = 0;
super_hit['bandwith'] = 0;
super_hit['pages'] = [];
return super_hit
def createUser(hit, robot):
super_hit = createGeneric(hit)
super_hit['robot'] = robot;
appendHit(hit)
def isRobot(hit):
for r in awstats_robots:
if r.match(hit['http_user_agent']):
return True
return False
def decode_http_request(hit):
if not 'request' in hit.keys(): return False
groups = http_request_extracted.match(hit['request'])
if groups:
hit['extract_request'] = groups.groupdict()
uri_groups = uri_re.match(hit['extract_request']['http_uri']);
if uri_groups:
hit['extract_request']['extract_uri'] = uri_groups.group('extract_uri')
hit['extract_request']['extract_parameters'] = uri_groups.group('extract_parameters')
else:
print "Bad request extraction " + hit['request']
return False
referer_groups = uri_re.match(hit['http_referer']);
if referer_groups:
hit['extract_referer']['extract_uri'] = referer_groups.group('extract_uri')
hit['extract_referer']['extract_parameters'] = referer_groups.group('extract_parameters')
return True
def decode_time(hit):
t = hit['time_local']
hit['time_decoded'] = time.strptime(t, time_format)
def newHit(hit):
global cur_time
if not decode_http_request(hit): return
for k in hit.keys():
if hit[k] == '-': hit[k] = ''
decode_time(hit)
t = hit['time_decoded']
current_visit['last_time'] = t
if cur_time == None:
cur_time = t
else:
if cur_time.tm_mday != t.tm_mday:
return False
remote_addr = hit['remote_addr']
if remote_addr in current_visit.keys():
appendHit(hit)
else:
createUser(hit, isRobot(hit))
return True
print '==> Analysing log'
f = open("access.log")
for l in f:
# print "line " + l;
groups = log_re.match(l)
if groups:
if not newHit(groups.groupdict()):
break
else:
print "No match " + l
f.close();
print '==> Call plugins'
plugins = glob.glob('./hooks_pre/*.py')
plugins.sort()
for p in plugins:
print '\t%s' % (p)
mod = imp.load_source('hook', p)
mod.hook(current_visit)
for ip in current_visit.keys():
hit = current_visit[ip]
if hit['robot']: continue
print "%s =>" % (ip)
for k in hit.keys():
if k != 'pages':
print "\t%s : %s" % (k, current_visit[ip][k])
iwla_convert.pl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
#!/usr/bin/perl
my $awstats_lib_root = '/usr/share/awstats/lib/';
my @awstats_libs = ('browsers.pm', 'browsers_phone.pm', 'mime.pm', 'referer_spam.pm', 'search_engines.pm', 'operating_systems.pm', 'robots.pm', 'worms.pm');
foreach $lib (@awstats_libs) {require $awstats_lib_root . $lib;}
open($FIC,">", "robots.py") or die $!;
print $FIC "awstats_robots = [";
$first = 0;
foreach $r (@RobotsSearchIDOrder_list1)
{
$r =~ s/\'/\\\'/g;
if ($first != 0)
{
print $FIC ", ";
}
else
{
$first = 1;
}
print $FIC "'.*$r.*'";
}
foreach $r (@RobotsSearchIDOrder_list2)
{
$r =~ s/\'/\\\'/g;
print $FIC ", '.*$r.*'";
}
print $FIC "]\n\n";
close($FIC);
robots.py
1
2
awstats_robots = ['.*appie.*', '.*architext.*', '.*jeeves.*', '.*bjaaland.*', '.*contentmatch.*', '.*ferret.*', '.*googlebot.*', '.*google\-sitemaps.*', '.*gulliver.*', '.*virus[_+ ]detector.*', '.*harvest.*', '.*htdig.*', '.*linkwalker.*', '.*lilina.*', '.*lycos[_+ ].*', '.*moget.*', '.*muscatferret.*', '.*myweb.*', '.*nomad.*', '.*scooter.*', '.*slurp.*', '.*^voyager\/.*', '.*weblayers.*', '.*antibot.*', '.*bruinbot.*', '.*digout4u.*', '.*echo!.*', '.*fast\-webcrawler.*', '.*ia_archiver\-web\.archive\.org.*', '.*ia_archiver.*', '.*jennybot.*', '.*mercator.*', '.*netcraft.*', '.*msnbot\-media.*', '.*msnbot.*', '.*petersnews.*', '.*relevantnoise\.com.*', '.*unlost_web_crawler.*', '.*voila.*', '.*webbase.*', '.*webcollage.*', '.*cfetch.*', '.*zyborg.*', '.*wisenutbot.*', '.*[^a]fish.*', '.*abcdatos.*', '.*acme\.spider.*', '.*ahoythehomepagefinder.*', '.*alkaline.*', '.*anthill.*', '.*arachnophilia.*', '.*arale.*', '.*araneo.*', '.*aretha.*', '.*ariadne.*', '.*powermarks.*', '.*arks.*', '.*aspider.*', '.*atn\.txt.*', '.*atomz.*', '.*auresys.*', '.*backrub.*', '.*bbot.*', '.*bigbrother.*', '.*blackwidow.*', '.*blindekuh.*', '.*bloodhound.*', '.*borg\-bot.*', '.*brightnet.*', '.*bspider.*', '.*cactvschemistryspider.*', '.*calif[^r].*', '.*cassandra.*', '.*cgireader.*', '.*checkbot.*', '.*christcrawler.*', '.*churl.*', '.*cienciaficcion.*', '.*collective.*', '.*combine.*', '.*conceptbot.*', '.*coolbot.*', '.*core.*', '.*cosmos.*', '.*cruiser.*', '.*cusco.*', '.*cyberspyder.*', '.*desertrealm.*', '.*deweb.*', '.*dienstspider.*', '.*digger.*', '.*diibot.*', '.*direct_hit.*', '.*dnabot.*', '.*download_express.*', '.*dragonbot.*', '.*dwcp.*', '.*e\-collector.*', '.*ebiness.*', '.*elfinbot.*', '.*emacs.*', '.*emcspider.*', '.*esther.*', '.*evliyacelebi.*', '.*fastcrawler.*', '.*feedcrawl.*', '.*fdse.*', '.*felix.*', '.*fetchrover.*', '.*fido.*', '.*finnish.*', '.*fireball.*', '.*fouineur.*', '.*francoroute.*', '.*freecrawl.*', '.*funnelweb.*', '.*gama.*', '.*gazz.*', '.*gcreep.*', '.*getbot.*', '.*geturl.*', '.*golem.*', '.*gougou.*', '.*grapnel.*', '.*griffon.*', '.*gromit.*', '.*gulperbot.*', '.*hambot.*', '.*havindex.*', '.*hometown.*', '.*htmlgobble.*', '.*hyperdecontextualizer.*', '.*iajabot.*', '.*iaskspider.*', '.*hl_ftien_spider.*', '.*sogou.*', '.*iconoclast.*', '.*ilse.*', '.*imagelock.*', '.*incywincy.*', '.*informant.*', '.*infoseek.*', '.*infoseeksidewinder.*', '.*infospider.*', '.*inspectorwww.*', '.*intelliagent.*', '.*irobot.*', '.*iron33.*', '.*israelisearch.*', '.*javabee.*', '.*jbot.*', '.*jcrawler.*', '.*jobo.*', '.*jobot.*', '.*joebot.*', '.*jubii.*', '.*jumpstation.*', '.*kapsi.*', '.*katipo.*', '.*kilroy.*', '.*ko[_+ ]yappo[_+ ]robot.*', '.*kummhttp.*', '.*labelgrabber\.txt.*', '.*larbin.*', '.*legs.*', '.*linkidator.*', '.*linkscan.*', '.*lockon.*', '.*logo_gif.*', '.*macworm.*', '.*magpie.*', '.*marvin.*', '.*mattie.*', '.*mediafox.*', '.*merzscope.*', '.*meshexplorer.*', '.*mindcrawler.*', '.*mnogosearch.*', '.*momspider.*', '.*monster.*', '.*motor.*', '.*muncher.*', '.*mwdsearch.*', '.*ndspider.*', '.*nederland\.zoek.*', '.*netcarta.*', '.*netmechanic.*', '.*netscoop.*', '.*newscan\-online.*', '.*nhse.*', '.*northstar.*', '.*nzexplorer.*', '.*objectssearch.*', '.*occam.*', '.*octopus.*', '.*openfind.*', '.*orb_search.*', '.*packrat.*', '.*pageboy.*', '.*parasite.*', '.*patric.*', '.*pegasus.*', '.*perignator.*', '.*perlcrawler.*', '.*phantom.*', '.*phpdig.*', '.*piltdownman.*', '.*pimptrain.*', '.*pioneer.*', '.*pitkow.*', '.*pjspider.*', '.*plumtreewebaccessor.*', '.*poppi.*', '.*portalb.*', '.*psbot.*', '.*python.*', '.*raven.*', '.*rbse.*', '.*resumerobot.*', '.*rhcs.*', '.*road_runner.*', '.*robbie.*', '.*robi.*', '.*robocrawl.*', '.*robofox.*', '.*robozilla.*', '.*roverbot.*', '.*rules.*', '.*safetynetrobot.*', '.*search\-info.*', '.*search_au.*', '.*searchprocess.*', '.*senrigan.*', '.*sgscout.*', '.*shaggy.*', '.*shaihulud.*', '.*sift.*', '.*simbot.*', '.*site\-valet.*', '.*sitetech.*', '.*skymob.*', '.*slcrawler.*', '.*smartspider.*', '.*snooper.*', '.*solbot.*', '.*speedy.*', '.*spider[_+ ]monkey.*', '.*spiderbot.*', '.*spiderline.*', '.*spiderman.*', '.*spiderview.*', '.*spry.*', '.*sqworm.*', '.*ssearcher.*', '.*suke.*', '.*sunrise.*', '.*suntek.*', '.*sven.*', '.*tach_bw.*', '.*tagyu_agent.*', '.*tailrank.*', '.*tarantula.*', '.*tarspider.*', '.*techbot.*', '.*templeton.*', '.*titan.*', '.*titin.*', '.*tkwww.*', '.*tlspider.*', '.*ucsd.*', '.*udmsearch.*', '.*universalfeedparser.*', '.*urlck.*', '.*valkyrie.*', '.*verticrawl.*', '.*victoria.*', '.*visionsearch.*', '.*voidbot.*', '.*vwbot.*', '.*w3index.*', '.*w3m2.*', '.*wallpaper.*', '.*wanderer.*', '.*wapspIRLider.*', '.*webbandit.*', '.*webcatcher.*', '.*webcopy.*', '.*webfetcher.*', '.*webfoot.*', '.*webinator.*', '.*weblinker.*', '.*webmirror.*', '.*webmoose.*', '.*webquest.*', '.*webreader.*', '.*webreaper.*', '.*websnarf.*', '.*webspider.*', '.*webvac.*', '.*webwalk.*', '.*webwalker.*', '.*webwatch.*', '.*whatuseek.*', '.*whowhere.*', '.*wired\-digital.*', '.*wmir.*', '.*wolp.*', '.*wombat.*', '.*wordpress.*', '.*worm.*', '.*woozweb.*', '.*wwwc.*', '.*wz101.*', '.*xget.*', '.*1\-more_scanner.*', '.*accoona\-ai\-agent.*', '.*activebookmark.*', '.*adamm_bot.*', '.*almaden.*', '.*aipbot.*', '.*aleadsoftbot.*', '.*alpha_search_agent.*', '.*allrati.*', '.*aport.*', '.*archive\.org_bot.*', '.*argus.*', '.*arianna\.libero\.it.*', '.*aspseek.*', '.*asterias.*', '.*awbot.*', '.*baiduspider.*', '.*becomebot.*', '.*bender.*', '.*betabot.*', '.*biglotron.*', '.*bittorrent_bot.*', '.*biz360[_+ ]spider.*', '.*blogbridge[_+ ]service.*', '.*bloglines.*', '.*blogpulse.*', '.*blogsearch.*', '.*blogshares.*', '.*blogslive.*', '.*blogssay.*', '.*bncf\.firenze\.sbn\.it\/raccolta\.txt.*', '.*bobby.*', '.*boitho\.com\-dc.*', '.*bookmark\-manager.*', '.*boris.*', '.*bumblebee.*', '.*candlelight[_+ ]favorites[_+ ]inspector.*', '.*cbn00glebot.*', '.*cerberian_drtrs.*', '.*cfnetwork.*', '.*cipinetbot.*', '.*checkweb_link_validator.*', '.*commons\-httpclient.*', '.*computer_and_automation_research_institute_crawler.*', '.*converamultimediacrawler.*', '.*converacrawler.*', '.*cscrawler.*', '.*cse_html_validator_lite_online.*', '.*cuasarbot.*', '.*cursor.*', '.*custo.*', '.*datafountains\/dmoz_downloader.*', '.*daviesbot.*', '.*daypopbot.*', '.*deepindex.*', '.*dipsie\.bot.*', '.*dnsgroup.*', '.*domainchecker.*', '.*domainsdb\.net.*', '.*dulance.*', '.*dumbot.*', '.*dumm\.de\-bot.*', '.*earthcom\.info.*', '.*easydl.*', '.*edgeio\-retriever.*', '.*ets_v.*', '.*exactseek.*', '.*extreme[_+ ]picture[_+ ]finder.*', '.*eventax.*', '.*everbeecrawler.*', '.*everest\-vulcan.*', '.*ezresult.*', '.*enteprise.*', '.*facebook.*', '.*fast_enterprise_crawler.*crawleradmin\.t\-info@telekom\.de.*', '.*fast_enterprise_crawler.*t\-info_bi_cluster_crawleradmin\.t\-info@telekom\.de.*', '.*matrix_s\.p\.a\._\-_fast_enterprise_crawler.*', '.*fast_enterprise_crawler.*', '.*fast\-search\-engine.*', '.*favicon.*', '.*favorg.*', '.*favorites_sweeper.*', '.*feedburner.*', '.*feedfetcher\-google.*', '.*feedflow.*', '.*feedster.*', '.*feedsky.*', '.*feedvalidator.*', '.*filmkamerabot.*', '.*findlinks.*', '.*findexa_crawler.*', '.*fooky\.com\/ScorpionBot.*', '.*g2crawler.*', '.*gaisbot.*', '.*geniebot.*', '.*gigabot.*', '.*girafabot.*', '.*global_fetch.*', '.*gnodspider.*', '.*goforit\.com.*', '.*goforitbot.*', '.*gonzo.*', '.*grub.*', '.*gpu_p2p_crawler.*', '.*henrythemiragorobot.*', '.*heritrix.*', '.*holmes.*', '.*hoowwwer.*', '.*hpprint.*', '.*htmlparser.*', '.*html[_+ ]link[_+ ]validator.*', '.*httrack.*', '.*hundesuche\.com\-bot.*', '.*ichiro.*', '.*iltrovatore\-setaccio.*', '.*infobot.*', '.*infociousbot.*', '.*infomine.*', '.*insurancobot.*', '.*internet[_+ ]ninja.*', '.*internetarchive.*', '.*internetseer.*', '.*internetsupervision.*', '.*irlbot.*', '.*isearch2006.*', '.*iupui_research_bot.*', '.*jrtwine[_+ ]software[_+ ]check[_+ ]favorites[_+ ]utility.*', '.*justview.*', '.*kalambot.*', '.*kamano\.de_newsfeedverzeichnis.*', '.*kazoombot.*', '.*kevin.*', '.*keyoshid.*', '.*kinjabot.*', '.*kinja\-imagebot.*', '.*knowitall.*', '.*knowledge\.com.*', '.*kouaa_krawler.*', '.*krugle.*', '.*ksibot.*', '.*kurzor.*', '.*lanshanbot.*', '.*letscrawl\.com.*', '.*libcrawl.*', '.*linkbot.*', '.*link_valet_online.*', '.*metager\-linkchecker.*', '.*linkchecker.*', '.*livejournal\.com.*', '.*lmspider.*', '.*lwp\-request.*', '.*lwp\-trivial.*', '.*magpierss.*', '.*mail\.ru.*', '.*mapoftheinternet\.com.*', '.*mediapartners\-google.*', '.*megite.*', '.*metaspinner.*', '.*microsoft[_+ ]url[_+ ]control.*', '.*mini\-reptile.*', '.*minirank.*', '.*missigua_locator.*', '.*misterbot.*', '.*miva.*', '.*mizzu_labs.*', '.*mj12bot.*', '.*mojeekbot.*', '.*msiecrawler.*', '.*ms_search_4\.0_robot.*', '.*msrabot.*', '.*msrbot.*', '.*mt::telegraph::agent.*', '.*nagios.*', '.*nasa_search.*', '.*mydoyouhike.*', '.*netluchs.*', '.*netsprint.*', '.*newsgatoronline.*', '.*nicebot.*', '.*nimblecrawler.*', '.*noxtrumbot.*', '.*npbot.*', '.*nutchcvs.*', '.*nutchosu\-vlib.*', '.*nutch.*', '.*ocelli.*', '.*octora_beta_bot.*', '.*omniexplorer[_+ ]bot.*', '.*onet\.pl[_+ ]sa.*', '.*onfolio.*', '.*opentaggerbot.*', '.*openwebspider.*', '.*oracle_ultra_search.*', '.*orbiter.*', '.*yodaobot.*', '.*qihoobot.*', '.*passwordmaker\.org.*', '.*pear_http_request_class.*', '.*peerbot.*', '.*perman.*', '.*php[_+ ]version[_+ ]tracker.*', '.*pictureofinternet.*', '.*ping\.blo\.gs.*', '.*plinki.*', '.*pluckfeedcrawler.*', '.*pogodak.*', '.*pompos.*', '.*popdexter.*', '.*port_huron_labs.*', '.*postfavorites.*', '.*projectwf\-java\-test\-crawler.*', '.*proodlebot.*', '.*pyquery.*', '.*rambler.*', '.*redalert.*', '.*rojo.*', '.*rssimagesbot.*', '.*ruffle.*', '.*rufusbot.*', '.*sandcrawler.*', '.*sbider.*', '.*schizozilla.*', '.*scumbot.*', '.*searchguild[_+ ]dmoz[_+ ]experiment.*', '.*seekbot.*', '.*sensis_web_crawler.*', '.*seznambot.*', '.*shim\-crawler.*', '.*shoutcast.*', '.*slysearch.*', '.*snap\.com_beta_crawler.*', '.*sohu\-search.*', '.*sohu.*', '.*snappy.*', '.*sphere_scout.*', '.*spip.*', '.*sproose_crawler.*', '.*steeler.*', '.*steroid__download.*', '.*suchfin\-bot.*', '.*superbot.*', '.*surveybot.*', '.*susie.*', '.*syndic8.*', '.*syndicapi.*', '.*synoobot.*', '.*tcl_http_client_package.*', '.*technoratibot.*', '.*teragramcrawlersurf.*', '.*test_crawler.*', '.*testbot.*', '.*t\-h\-u\-n\-d\-e\-r\-s\-t\-o\-n\-e.*', '.*topicblogs.*', '.*turnitinbot.*', '.*turtlescanner.*', '.*turtle.*', '.*tutorgigbot.*', '.*twiceler.*', '.*ubicrawler.*', '.*ultraseek.*', '.*unchaos_bot_hybrid_web_search_engine.*', '.*unido\-bot.*', '.*updated.*', '.*ustc\-semantic\-group.*', '.*vagabondo\-wap.*', '.*vagabondo.*', '.*vermut.*', '.*versus_crawler_from_eda\.baykan@epfl\.ch.*', '.*vespa_crawler.*', '.*vortex.*', '.*vse\/.*', '.*w3c\-checklink.*', '.*w3c[_+ ]css[_+ ]validator[_+ ]jfouffa.*', '.*w3c_validator.*', '.*watchmouse.*', '.*wavefire.*', '.*webclipping\.com.*', '.*webcompass.*', '.*webcrawl\.net.*', '.*web_downloader.*', '.*webdup.*', '.*webfilter.*', '.*webindexer.*', '.*webminer.*', '.*website[_+ ]monitoring[_+ ]bot.*', '.*webvulncrawl.*', '.*wells_search.*', '.*wonderer.*', '.*wume_crawler.*', '.*wwweasel.*', '.*xenu\'s_link_sleuth.*', '.*xenu_link_sleuth.*', '.*xirq.*', '.*y!j.*', '.*yacy.*', '.*yahoo\-blogs.*', '.*yahoo\-verticalcrawler.*', '.*yahoofeedseeker.*', '.*yahooseeker\-testing.*', '.*yahooseeker.*', '.*yahoo\-mmcrawler.*', '.*yahoo!_mindset.*', '.*yandex.*', '.*flexum.*', '.*yanga.*', '.*yooglifetchagent.*', '.*z\-add_link_checker.*', '.*zealbot.*', '.*zhuaxia.*', '.*zspider.*', '.*zeus.*', '.*ng\/1\..*', '.*ng\/2\..*', '.*exabot.*', '.*wget.*', '.*libwww.*', '.*java\/[0-9].*']

Archive Download the corresponding diff file

Branches

Tags