一.环境准备
Windows 7 64位
Jdk-1.8.0_65
solr-5.3.1
apache-tomcat-8.0.28
word-1.3.jar
二.环境安装
1. word分词安装
1. 下载word-1.3.jar
下载地址:http://search.maven.org/remotecontent?filepath=org/apdplat/word/1.3/word-1.3.jar
1. 将word-1.3.jar拷贝到D:\apache-tomcat-8.0.28\webapps\solr\WEB-INF\lib文件夹下,如图:
2. 配置 schema.xml 指定分词器
将D:\apache-tomcat-8.0.28\webapps\solr\solr_home\core1\conf\schema.xml文件中所有的
<tokenizer class="solr.WhitespaceTokenizerFactory"/>和
<tokenizer class="solr.StandardTokenizerFactory"/>全部替换为
<tokenizer class="org.apdplat.word.solr.ChineseWordTokenizerFactory"/>
移除所有的filter标签,如包含<filter>的行
schema.xml 文件如下:
<?xml version="1.0" encoding="UTF-8" ?>
<schema name="example" version="1.5">
<field name="_version_" type="long" indexed="true" stored="true"/>
<field name="_root_" type="string" indexed="true" stored="false"/>
<field name="id" type="string" indexed="true" stored="true" required="true" multiValued="false" />
<dynamicField name="*_i" type="int" indexed="true" stored="true"/>
<dynamicField name="*_is" type="int" indexed="true" stored="true" multiValued="true"/>
<dynamicField name="*_s" type="string" indexed="true" stored="true" />
<dynamicField name="*_ss" type="string" indexed="true" stored="true" multiValued="true"/>
<dynamicField name="*_l" type="long" indexed="true" stored="true"/>
<dynamicField name="*_ls" type="long" indexed="true" stored="true" multiValued="true"/>
<dynamicField name="*_t" type="text_general" indexed="true" stored="true"/>
<dynamicField name="*_txt" type="text_general" indexed="true" stored="true" multiValued="true"/>
<dynamicField name="*_en" type="text_en" indexed="true" stored="true" multiValued="true"/>
<dynamicField name="*_b" type="boolean" indexed="true" stored="true"/>
<dynamicField name="*_bs" type="boolean" indexed="true" stored="true" multiValued="true"/>
<dynamicField name="*_f" type="float" indexed="true" stored="true"/>
<dynamicField name="*_fs" type="float" indexed="true" stored="true" multiValued="true"/>
<dynamicField name="*_d" type="double" indexed="true" stored="true"/>
<dynamicField name="*_ds" type="double" indexed="true" stored="true" multiValued="true"/>
<dynamicField name="*_coordinate" type="tdouble" indexed="true" stored="false" />
<dynamicField name="*_dt" type="date" indexed="true" stored="true"/>
<dynamicField name="*_dts" type="date" indexed="true" stored="true" multiValued="true"/>
<dynamicField name="*_p" type="location" indexed="true" stored="true"/>
<dynamicField name="*_ti" type="tint" indexed="true" stored="true"/>
<dynamicField name="*_tl" type="tlong" indexed="true" stored="true"/>
<dynamicField name="*_tf" type="tfloat" indexed="true" stored="true"/>
<dynamicField name="*_td" type="tdouble" indexed="true" stored="true"/>
<dynamicField name="*_tdt" type="tdate" indexed="true" stored="true"/>
<dynamicField name="*_c" type="currency" indexed="true" stored="true"/>
<dynamicField name="ignored_*" type="ignored" multiValued="true"/>
<dynamicField name="attr_*" type="text_general" indexed="true" stored="true" multiValued="true"/>
<dynamicField name="random_*" type="random" />
<uniqueKey>id</uniqueKey>
<fieldType name="string" class="solr.StrField" sortMissingLast="true" />
<fieldType name="boolean" class="solr.BoolField" sortMissingLast="true"/>
<fieldType name="int" class="solr.TrieIntField" precisionStep="0" positionIncrementGap="0"/>
<fieldType name="float" class="solr.TrieFloatField" precisionStep="0" positionIncrementGap="0"/>
<fieldType name="long" class="solr.TrieLongField" precisionStep="0" positionIncrementGap="0"/>
<fieldType name="double" class="solr.TrieDoubleField" precisionStep="0" positionIncrementGap="0"/>
<fieldType name="tint" class="solr.TrieIntField" precisionStep="8" positionIncrementGap="0"/>
<fieldType name="tfloat" class="solr.TrieFloatField" precisionStep="8" positionIncrementGap="0"/>
<fieldType name="tlong" class="solr.TrieLongField" precisionStep="8" positionIncrementGap="0"/>
<fieldType name="tdouble" class="solr.TrieDoubleField" precisionStep="8" positionIncrementGap="0"/>
<fieldType name="date" class="solr.TrieDateField" precisionStep="0" positionIncrementGap="0"/>
<fieldType name="tdate" class="solr.TrieDateField" precisionStep="6" positionIncrementGap="0"/>
<fieldType name="binary" class="solr.BinaryField"/>
<fieldType name="random" class="solr.RandomSortField" indexed="true" />
<fieldType name="text_ws" class="solr.TextField" positionIncrementGap="100">
<analyzer>
<tokenizer class="org.apdplat.word.solr.ChineseWordTokenizerFactory"/>
</analyzer>
</fieldType>
<fieldType name="text_general" class="solr.TextField" positionIncrementGap="100">
<analyzer type="index">
<tokenizer class="org.apdplat.word.solr.ChineseWordTokenizerFactory"/>
</analyzer>
<analyzer type="query">
<tokenizer class="org.apdplat.word.solr.ChineseWordTokenizerFactory"/>
</analyzer>
</fieldType>
<fieldType name="text_en" class="solr.TextField" positionIncrementGap="100">
<analyzer type="index">
<tokenizer class="org.apdplat.word.solr.ChineseWordTokenizerFactory"/>
</analyzer>
<analyzer type="query">
<tokenizer class="org.apdplat.word.solr.ChineseWordTokenizerFactory"/>
</analyzer>
</fieldType>
<fieldType name="text_en_splitting" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="true">
<analyzer type="index">
<tokenizer class="org.apdplat.word.solr.ChineseWordTokenizerFactory"/>
</analyzer>
<analyzer type="query">
<tokenizer class="org.apdplat.word.solr.ChineseWordTokenizerFactory"/>
</analyzer>
</fieldType>
<fieldType name="text_en_splitting_tight" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="true">
<analyzer>
<tokenizer class="org.apdplat.word.solr.ChineseWordTokenizerFactory"/>
</analyzer>
</fieldType>
<fieldType name="text_general_rev" class="solr.TextField" positionIncrementGap="100">
<analyzer type="index">
<tokenizer class="org.apdplat.word.solr.ChineseWordTokenizerFactory"/>
</analyzer>
<analyzer type="query">
<tokenizer class="org.apdplat.word.solr.ChineseWordTokenizerFactory"/>
</analyzer>
</fieldType>
<fieldType name="alphaOnlySort" class="solr.TextField" sortMissingLast="true" omitNorms="true">
<analyzer>
<tokenizer class="solr.KeywordTokenizerFactory"/>
</analyzer>
</fieldType>
<fieldType name="lowercase" class="solr.TextField" positionIncrementGap="100">
<analyzer>
<tokenizer class="solr.KeywordTokenizerFactory"/>
</analyzer>
</fieldType>
<fieldType name="ignored" stored="false" indexed="false" multiValued="true" class="solr.StrField" />
<fieldType name="point" class="solr.PointType" dimension="2" subFieldSuffix="_d"/>
<fieldType name="location" class="solr.LatLonType" subFieldSuffix="_coordinate"/>
<fieldType name="location_rpt" class="solr.SpatialRecursivePrefixTreeFieldType"
geo="true" distErrPct="0.025" maxDistErr="0.001" distanceUnits="kilometers" />
<fieldType name="bbox" class="solr.BBoxField"
geo="true" distanceUnits="kilometers" numberType="_bbox_coord" />
<fieldType name="_bbox_coord" class="solr.TrieDoubleField" precisionStep="8" docValues="true" stored="false"/>
<fieldType name="currency" class="solr.CurrencyField" precisionStep="8" defaultCurrency="USD" currencyConfig="currency.xml" />
</schema>
3. 如果需要使用特定的分词算法
<tokenizer class="org.apdplat.word.solr.ChineseWordTokenizerFactory" segAlgorithm="ReverseMinimumMatching"/>
segAlgorithm可选值有:
正向最大匹配算法:MaximumMatching
逆向最大匹配算法:ReverseMaximumMatching
正向最小匹配算法:MinimumMatching
逆向最小匹配算法:ReverseMinimumMatching
双向最大匹配算法:BidirectionalMaximumMatching
双向最小匹配算法:BidirectionalMinimumMatching
双向最大最小匹配算法:BidirectionalMaximumMinimumMatching
全切分算法:FullSegmentation
最少词数算法:MinimalWordCount
最大Ngram分值算法:MaxNgramScore
如不指定,默认使用双向最大匹配算法:BidirectionalMaximumMatching
4. 重启solr 服务,验证分词算法
进入主页面,选择“Core Select”—>“core1”—>”Analysis”,然后在右侧“_root_”选择“text_general”,如图:
在“field Value(Index)”输入分词的内容,如“塔里木盆地的地质结构”,点击 Analyse Values 按钮,分词结果展现,如图:
在“Field Value (Query)” 输入“地质” 点击 “Analyce Values” 按钮展示结果,如图:
2. Word自定义分词文件路径配置
1. 使用压缩方式打开word-1.3.jar,如图:
2. 把所有txt文件和word.conf文件解压到一个路径下,如D:\word-dic,然后把word.conf改名为:word.local.conf,如图:
修改:word.local.conf文件配置项
把“http://localhost:8080/word_web/resources”修改为:”D: word-dic“自定义的文件夹路径,其中以“#”号开始的行属于注释可以忽略。
word.lcoal.conf文件如下:
#是否启用自动检测功能,如:用户自定义词典、停用词词典
auto.detect=true
#词典机制实现类,词首字索引式前缀树
#dic.class=org.apdplat.word.dictionary.impl.DictionaryTrie
#前缀树词首字索引分配空间大小,如过小则会导致碰撞增加,减小查询性能
dictionary.trie.index.size=24000
#双数组前缀树,速度稍快一些,内存占用稍少一些
#但功能有限,不支持动态增减单个词条,也不支持批量增减词条
#只支持先clear()后addAll()的动态改变词典方式
dic.class=org.apdplat.word.dictionary.impl.DoubleArrayDictionaryTrie
#双数组前缀树预先分配空间大小,如不够则逐渐递增10%
double.array.dictionary.trie.size=2600000
#词典,多个词典之间逗号分隔开
#如:dic.path=classpath:dic.txt,classpath:custom_dic,d:/dic_more.txt,d:/DIC,D:/DIC2
#自动检测词库变化,包含类路径下的文件和文件夹、非类路径下的绝对路径和相对路径
#HTTP资源:dic.path=http://localhost:8080/word_web/resources/dic.txt
dic.path=D:/word-dic/dic.txt,D:/word-dic/protwords.txt
#是否利用多核提升分词速度
parallel.seg=true
#词性标注数据:part.of.speech.dic.path=http://localhost:8080/word_web/resources/part_of_speech_dic.txt
part.of.speech.dic.path=D:/word-dic/part_of_speech_dic.txt
#词性说明数据:part.of.speech.des.path=http://localhost:8080/word_web/resources/part_of_speech_des.txt
part.of.speech.des.path=D:/word-dic/part_of_speech_des.txt
#二元模型路径
#HTTP资源:bigram.path=http://localhost:8080/word_web/resources/bigram.txt
bigram.path=D:/word-dic/bigram.txt
bigram.double.array.trie.size=5300000
#三元模型路径
#HTTP资源:trigram.path=http://localhost:8080/word_web/resources/trigram.txt
trigram.path=D:/word-dic/trigram.txt
trigram.double.array.trie.size=9800000
#是否启用ngram模型,以及启用哪个模型
#可选值有:no(不启用)、bigram(二元模型)、trigram(三元模型)
#如不启用ngram模型
#则双向最大匹配算法、双向最大最小匹配算法退化为:逆向最大匹配算法
#则双向最小匹配算法退化为:逆向最小匹配算法
ngram=bigram
#停用词词典,多个词典之间逗号分隔开
#如:stopwords.path=classpath:stopwords.txt,classpath:custom_stopwords_dic,d:/stopwords_more.txt,d:/STOPWORDS,d:/STOPWORDS2
#自动检测词库变化,包含类路径下的文件和文件夹、非类路径下的绝对路径和相对路径
#HTTP资源:stopwords.path=http://localhost:8080/word_web/resources/stopwords.txt
stopwords.path=D:/word-dic/stopwords.txt
#用于分割词的标点符号,目的是为了加速分词,只能为单字符
#HTTP资源:punctuation.path=http://localhost:8080/word_web/resources/punctuation.txt
punctuation.path=D:/word-dic/punctuation.txt
#分词时截取的字符串的最大长度
intercept.length=16
#百家姓,用于人名识别
#HTTP资源:surname.path=http://localhost:8080/word_web/resources/surname.txt
surname.path=D:/word-dic/surname.txt
#数量词
#HTTP资源:quantifier.path=http://localhost:8080/word_web/resources/quantifier.txt
quantifier.path=D:/word-dic/quantifier.txt
#是否启用人名自动识别功能
person.name.recognize=true
#是否保留空白字符
keep.whitespace=false
#是否保留标点符号,标点符号的定义见文件:punctuation.txt
keep.punctuation=false
#将最多多少个词合并成一个
word.refine.combine.max.length=3
#对分词结果进行微调的配置文件
word.refine.path=D:/word-dic/word_refine.txt
#同义词词典
word.synonym.path=D:/word-dic/word_synonym.txt
#反义词词典
word.antonym.path=D:/word-dic/word_antonym.txt
#lucene、solr、elasticsearch、luke等插件是否启用标注
tagging.pinyin.full=false
tagging.pinyin.acronym=false
tagging.synonym=false
tagging.antonym=false
#是否启用识别工具,来识别文本(英文单词、数字、时间等)
recognition.tool.enabled=true
#如果你想知道word分词器的词典中究竟加载了哪些词
#可在配置项dic.dump.path中指定一个文件路径
#word分词器在加载词典的时候,顺便会把词典的内容写到指定的文件路径
#可指定相对路径或绝对路径
#如:
#dic.dump.path=dic.dump.txt
#dic.dump.path=dic.dump.txt
#dic.dump.path=/Users/ysc/dic.dump.txt
dic.dump.path=
#redis服务,用于实时检测HTTP资源变更
#redis主机
redis.host=localhost
#redis端口
redis.port=6379
3. 在 schema.xml 文件中指定的配置文件路径
在D:\apache-tomcat-8.0.28\webapps\solr\solr_home\core1\conf\schema.xml文件中找到所有<tokenizer class="org.apdplat.word.solr.ChineseWordTokenizerFactory"/>标签,在标签里增加:conf="D: /word-dic/word.local.conf",如下:
<tokenizer class="org.apdplat.word.solr.ChineseWordTokenizerFactory" conf="D:/word-dic/word.local.conf"/>