Merge pull request #10 from dalf/master
bug fixes and dailymotion engine
This commit is contained in:
		
						commit
						ff0bbd3566
					
				| @ -79,3 +79,8 @@ suggestion_xpath = //div[@id="satat"]//a | |||||||
| [youtube] | [youtube] | ||||||
| engine = youtube | engine = youtube | ||||||
| categories = videos | categories = videos | ||||||
|  | 
 | ||||||
|  | [dailymotion] | ||||||
|  | engine = dailymotion | ||||||
|  | categories = videos | ||||||
|  | 
 | ||||||
|  | |||||||
							
								
								
									
										32
									
								
								searx/engines/dailymotion.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										32
									
								
								searx/engines/dailymotion.py
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,32 @@ | |||||||
|  | from urllib import urlencode | ||||||
|  | from json import loads | ||||||
|  | from cgi import escape | ||||||
|  | 
 | ||||||
|  | categories = ['videos'] | ||||||
|  | localization = 'en' | ||||||
|  | 
 | ||||||
|  | # see http://www.dailymotion.com/doc/api/obj-video.html | ||||||
|  | search_url = 'https://api.dailymotion.com/videos?fields=title,description,duration,url,thumbnail_360_url&sort=relevance&limit=25&page=1&{query}' | ||||||
|  | 
 | ||||||
|  | def request(query, params): | ||||||
|  |     global search_url | ||||||
|  |     params['url'] = search_url.format(query=urlencode({'search': query, 'localization': localization })) | ||||||
|  |     return params | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def response(resp): | ||||||
|  |     results = [] | ||||||
|  |     search_res = loads(resp.text) | ||||||
|  |     if not 'list' in search_res: | ||||||
|  |         return results | ||||||
|  |     for res in search_res['list']: | ||||||
|  |         title = res['title'] | ||||||
|  |         url = res['url'] | ||||||
|  |         if res['thumbnail_360_url']: | ||||||
|  |             content = '<a href="{0}" title="{0}" ><img src="{1}" /></a><br />'.format(url, res['thumbnail_360_url']) | ||||||
|  |         else: | ||||||
|  |             content = '' | ||||||
|  |         if res['description']: | ||||||
|  |             content += escape(res['description'][:500]) | ||||||
|  |         results.append({'url': url, 'title': title, 'content': content}) | ||||||
|  |     return results | ||||||
| @ -7,7 +7,7 @@ from urlparse import urljoin | |||||||
| categories = ['images'] | categories = ['images'] | ||||||
| 
 | 
 | ||||||
| url = 'https://secure.flickr.com/' | url = 'https://secure.flickr.com/' | ||||||
| search_url = url+'search/?q={query}' | search_url = url+'search/?{query}' | ||||||
| 
 | 
 | ||||||
| def request(query, params): | def request(query, params): | ||||||
|     params['url'] = search_url.format(query=urlencode({'q': query})) |     params['url'] = search_url.format(query=urlencode({'q': query})) | ||||||
|  | |||||||
| @ -28,7 +28,7 @@ def extract_url(xpath_results): | |||||||
|             url = xpath_results[0].attrib.get('href') |             url = xpath_results[0].attrib.get('href') | ||||||
|     else: |     else: | ||||||
|         url = xpath_results.attrib.get('href') |         url = xpath_results.attrib.get('href') | ||||||
|     if not url.startswith('http://') or not url.startswith('https://'): |     if not url.startswith('http://') and not url.startswith('https://'): | ||||||
|         url = 'http://'+url |         url = 'http://'+url | ||||||
|     parsed_url = urlparse(url) |     parsed_url = urlparse(url) | ||||||
|     if not parsed_url.netloc: |     if not parsed_url.netloc: | ||||||
|  | |||||||
		Loading…
	
		Reference in New Issue
	
	Block a user