1. install jdk
$sudo apt-get install openjdk-7-jdk
2. Download and unpack Solr
sudo mkdir ~/tmp/solr
cd ~/tmp/solr
wget http://mirror.lividpenguin.com/pub/apache/lucene/solr/3.6.0/apache-solr-3.6.0.tgz
tar -xzvf apache-solr-3.6.0.tgz
*default jetty in solr, try to run java -jar start.jar* shutdown Ctrl-C
check http://localhost:8983/solr
3. Download and unpack Nutch
sudo mkdir ~/tmp/nutch
cd ~/tmp/nutch
wget http://mirror.rmg.io/apache/nutch/1.5/apache-nutch-1.5-bin.tar.gz
tar -xzvf apache-nutch-1.5-bin.tar.gz
4. configure Nutch
chmod +x bin/nutch
export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-i386
add in conf/nutch-site.xml
<property> <name>http.agent.name</name> <value>My Nutch Spider</value> </property>
mkdir -p urls
cd urls
touch seed.txt
nano seed.txt
add urls for crawling, for example
http://nutch.apache.org/
in conf/regex-urlfilter.txt and replace
# accept anything else +.
with a regular expression matching the domain you wish to crawl. For example, if you wished to limit the crawl to the nutch.apache.org domain, the line should read:
+^http://([a-z0-9]*\.)*nutch.apache.org/
5. configure Solr
~/tmp/solr/apache-solr-3.6.0/example/solr/conf
schema.xml add the following
<fieldType name="text" class="solr.TextField"
positionIncrementGap="100">
<analyzer>
<tokenizer class="solr.WhitespaceTokenizerFactory"/>
<filter class="solr.StopFilterFactory"
ignoreCase="true" words="stopwords.txt"/>
<filter class="solr.WordDelimiterFilterFactory"
generateWordParts="1" generateNumberParts="1"
catenateWords="1" catenateNumbers="1" catenateAll="0"
splitOnCaseChange="1"/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.EnglishPorterFilterFactory"
protected="protwords.txt"/>
<filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
</analyzer>
</fieldType>
<field name="digest" type="text" stored="true" indexed="true"/>
<field name="boost" type="text" stored="true" indexed="true"/>
<field name="segment" type="text" stored="true" indexed="true"/>
<field name="host" type="text" stored="true" indexed="true"/>
<field name="site" type="text" stored="true" indexed="true"/>
<field name="content" type="text" stored="true" indexed="true"/>
<field name="tstamp" type="text" stored="true" indexed="false"/>
<field name="url" type="string" stored="true" indexed="true"/>
<field name="anchor" type="text" stored="true" indexed="false" multiValued="true"/>
change <uniqueKey>id</uniqueKey> to
<uniqueKey>url</uniqueKey>
in solrconfig.xml add
<requestHandler name="/nutch" class="solr.SearchHandler" >
<lst name="defaults">
<str name="defType">dismax</str>
<str name="echoParams">explicit</str>
<float name="tie">0.01</float>
<str name="qf">
content^0.5 anchor^1.0 title^1.2
</str>
<str name="pf">
content^0.5 anchor^1.5 title^1.2 site^1.5
</str>
<str name="fl">
url
</str>
<int name="ps">100</int>
<bool name="hl">true</bool>
<str name="q.alt">*:*</str>
<str name="hl.fl">title url content</str>
<str name="f.title.hl.fragsize">0</str>
<str name="f.title.hl.alternateField">title</str>
<str name="f.url.hl.fragsize">0</str>
<str name="f.url.hl.alternateField">url</str>
<str name="f.content.hl.fragmenter">regex</str>
</lst>
</requestHandler>
6. run Nutch crawler and index in Solr (make sure Solr has started)
bin/nutch crawl urls -solr http://localhost:8983/solr/ -depth 3 -topN 5
check indexed files @ http://localhost:8983/solr
ignore error in Nutch running console:
SolrIndexer: starting at 2012-09-14 10:37:49
Indexing 11 documents
SolrIndexer: finished at 2012-09-14 10:38:36, elapsed: 00:00:46
SolrDeleteDuplicates: starting at 2012-09-14 10:38:36
SolrDeleteDuplicates: Solr url: http://localhost:8983/solr/
Exception in thread "main" java.io.IOException: Job failed!
at org.apache.hadoop.mapred.JobClient.runJob(JobClient.java:1265)
at org.apache.nutch.indexer.solr.SolrDeleteDuplicates.dedup(SolrDeleteDuplicates.java:373)
at org.apache.nutch.indexer.solr.SolrDeleteDuplicates.dedup(SolrDeleteDuplicates.java:353)
at org.apache.nutch.crawl.Crawl.run(Crawl.java:153)
at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:65)
at org.apache.nutch.crawl.Crawl.main(Crawl.java:55)
No comments:
Post a Comment