| 
									
										
										
										
											2016-04-03 22:03:41 +02:00
										 |  |  | # Doku Wiki | 
					
						
							|  |  |  | # | 
					
						
							|  |  |  | # @website     https://www.dokuwiki.org/ | 
					
						
							|  |  |  | # @provide-api yes | 
					
						
							|  |  |  | #              (https://www.dokuwiki.org/devel:xmlrpc) | 
					
						
							|  |  |  | # | 
					
						
							|  |  |  | # @using-api   no | 
					
						
							|  |  |  | # @results     HTML | 
					
						
							|  |  |  | # @stable      yes | 
					
						
							|  |  |  | # @parse       (general)    url, title, content | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | from urllib import urlencode | 
					
						
							|  |  |  | from lxml.html import fromstring | 
					
						
							|  |  |  | from searx.engines.xpath import extract_text | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | # engine dependent config | 
					
						
							|  |  |  | categories = ['general']  # TODO , 'images', 'music', 'videos', 'files' | 
					
						
							|  |  |  | paging = False | 
					
						
							|  |  |  | language_support = False | 
					
						
							|  |  |  | number_of_results = 5 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | # search-url | 
					
						
							|  |  |  | # Doku is OpenSearch compatible | 
					
						
							|  |  |  | base_url = 'http://localhost:8090' | 
					
						
							|  |  |  | search_url = '/?do=search'\ | 
					
						
							| 
									
										
										
										
											2016-04-05 13:31:49 +02:00
										 |  |  |              '&{query}' | 
					
						
							| 
									
										
										
										
											2016-04-03 22:03:41 +02:00
										 |  |  | # TODO             '&startRecord={offset}'\ | 
					
						
							|  |  |  | # TODO             '&maximumRecords={limit}'\ | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-04-04 13:38:22 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-04-03 22:03:41 +02:00
										 |  |  | # do search-request | 
					
						
							|  |  |  | def request(query, params): | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     params['url'] = base_url +\ | 
					
						
							| 
									
										
										
										
											2016-04-05 13:31:49 +02:00
										 |  |  |         search_url.format(query=urlencode({'id': query})) | 
					
						
							| 
									
										
										
										
											2016-04-03 22:03:41 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  |     return params | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | # get response from search-request | 
					
						
							|  |  |  | def response(resp): | 
					
						
							|  |  |  |     results = [] | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     doc = fromstring(resp.text) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     # parse results | 
					
						
							|  |  |  |     # Quickhits | 
					
						
							|  |  |  |     for r in doc.xpath('//div[@class="search_quickresult"]/ul/li'): | 
					
						
							|  |  |  |         try: | 
					
						
							|  |  |  |             res_url = r.xpath('.//a[@class="wikilink1"]/@href')[-1] | 
					
						
							|  |  |  |         except: | 
					
						
							|  |  |  |             continue | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |         if not res_url: | 
					
						
							|  |  |  |             continue | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |         title = extract_text(r.xpath('.//a[@class="wikilink1"]/@title')) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |         # append result | 
					
						
							|  |  |  |         results.append({'title': title, | 
					
						
							|  |  |  |                         'content': "", | 
					
						
							|  |  |  |                         'url': base_url + res_url}) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     # Search results | 
					
						
							|  |  |  |     for r in doc.xpath('//dl[@class="search_results"]/*'): | 
					
						
							|  |  |  |         try: | 
					
						
							|  |  |  |             if r.tag == "dt": | 
					
						
							|  |  |  |                 res_url = r.xpath('.//a[@class="wikilink1"]/@href')[-1] | 
					
						
							|  |  |  |                 title = extract_text(r.xpath('.//a[@class="wikilink1"]/@title')) | 
					
						
							|  |  |  |             elif r.tag == "dd": | 
					
						
							|  |  |  |                 content = extract_text(r.xpath('.')) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |                 # append result | 
					
						
							|  |  |  |                 results.append({'title': title, | 
					
						
							|  |  |  |                                 'content': content, | 
					
						
							|  |  |  |                                 'url': base_url + res_url}) | 
					
						
							|  |  |  |         except: | 
					
						
							|  |  |  |             continue | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |         if not res_url: | 
					
						
							|  |  |  |             continue | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     # return results | 
					
						
							|  |  |  |     return results |