| 
									
										
										
										
											2017-01-15 14:24:19 +01:00
										 |  |  | from lxml import html | 
					
						
							|  |  |  | from searx.engines.xpath import extract_text | 
					
						
							| 
									
										
										
										
											2017-01-15 15:29:01 +01:00
										 |  |  | from searx.utils import get_torrent_size | 
					
						
							| 
									
										
										
										
											2016-11-30 18:43:03 +01:00
										 |  |  | from searx.url_utils import quote, urljoin | 
					
						
							| 
									
										
										
										
											2017-01-15 14:24:19 +01:00
										 |  |  | 
 | 
					
						
							|  |  |  | url = 'https://1337x.to/' | 
					
						
							|  |  |  | search_url = url + 'search/{search_term}/{pageno}/' | 
					
						
							| 
									
										
										
										
											2017-01-15 15:08:43 +01:00
										 |  |  | categories = ['videos'] | 
					
						
							| 
									
										
										
										
											2017-01-15 14:24:19 +01:00
										 |  |  | paging = True | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-01-15 14:50:29 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-01-15 14:24:19 +01:00
										 |  |  | def request(query, params): | 
					
						
							|  |  |  |     params['url'] = search_url.format(search_term=quote(query), pageno=params['pageno']) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     return params | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-01-15 14:50:29 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-01-15 14:24:19 +01:00
										 |  |  | def response(resp): | 
					
						
							|  |  |  |     results = [] | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     dom = html.fromstring(resp.text) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     for result in dom.xpath('//table[contains(@class, "table-list")]/tbody//tr'): | 
					
						
							|  |  |  |         href = urljoin(url, result.xpath('./td[contains(@class, "name")]/a[2]/@href')[0]) | 
					
						
							|  |  |  |         title = extract_text(result.xpath('./td[contains(@class, "name")]/a[2]')) | 
					
						
							| 
									
										
										
										
											2017-01-15 15:29:01 +01:00
										 |  |  |         seed = extract_text(result.xpath('.//td[contains(@class, "seeds")]')) | 
					
						
							|  |  |  |         leech = extract_text(result.xpath('.//td[contains(@class, "leeches")]')) | 
					
						
							|  |  |  |         filesize_info = extract_text(result.xpath('.//td[contains(@class, "size")]/text()')) | 
					
						
							|  |  |  |         filesize, filesize_multiplier = filesize_info.split() | 
					
						
							|  |  |  |         filesize = get_torrent_size(filesize, filesize_multiplier) | 
					
						
							| 
									
										
										
										
											2017-01-15 14:24:19 +01:00
										 |  |  | 
 | 
					
						
							|  |  |  |         results.append({'url': href, | 
					
						
							|  |  |  |                         'title': title, | 
					
						
							| 
									
										
										
										
											2017-01-15 15:29:01 +01:00
										 |  |  |                         'seed': seed, | 
					
						
							|  |  |  |                         'leech': leech, | 
					
						
							|  |  |  |                         'filesize': filesize, | 
					
						
							|  |  |  |                         'template': 'torrent.html'}) | 
					
						
							| 
									
										
										
										
											2017-01-15 14:24:19 +01:00
										 |  |  | 
 | 
					
						
							|  |  |  |     return results |