diff --git a/.gitignore b/.gitignore
index 4cc20423c..76ae1ca2c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,2 +1,5 @@
env
engines.cfg
+
+*.pyc
+*/*.pyc
\ No newline at end of file
diff --git a/searx/engines/filecrop.py b/searx/engines/filecrop.py
new file mode 100644
index 000000000..a1a933db9
--- /dev/null
+++ b/searx/engines/filecrop.py
@@ -0,0 +1,73 @@
+from json import loads
+from urllib import urlencode
+from searx.utils import html_to_text
+from HTMLParser import HTMLParser
+
+url = 'http://www.filecrop.com/'
+search_url = url + '/search.php?{query}&size_i=0&size_f=100000000&engine_r=1&engine_d=1&engine_e=1&engine_4=1&engine_m=1'
+
+class FilecropResultParser(HTMLParser):
+ def __init__(self):
+ HTMLParser.__init__(self)
+ self.__start_processing = False
+
+ self.results = []
+ self.result = {}
+
+ self.tr_counter = 0
+ self.data_counter = 0
+
+ def handle_starttag(self, tag, attrs):
+
+ if tag == 'tr':
+ if ('bgcolor', '#edeff5') in attrs or ('bgcolor', '#ffffff') in attrs:
+ self.__start_processing = True
+
+ if not self.__start_processing:
+ return
+
+ if tag == 'label':
+ self.result['title'] = [attr[1] for attr in attrs if attr[0] == 'title'][0]
+ elif tag == 'a' and ('rel', 'nofollow') in attrs and ('class', 'sourcelink') in attrs:
+ if 'content' in self.result:
+ self.result['content'] += [attr[1] for attr in attrs if attr[0] == 'title'][0]
+ else:
+ self.result['content'] = [attr[1] for attr in attrs if attr[0] == 'title'][0]
+ self.result['content'] += ' '
+ elif tag == 'a':
+ self.result['url'] = url + [attr[1] for attr in attrs if attr[0] == 'href'][0]
+
+ def handle_endtag(self, tag):
+ if self.__start_processing is False:
+ return
+
+ if tag == 'tr':
+ self.tr_counter += 1
+
+ if self.tr_counter == 2:
+ self.__start_processing = False
+ self.tr_counter = 0
+ self.data_counter = 0
+ self.results.append(self.result)
+ self.result = {}
+
+ def handle_data(self, data):
+ if not self.__start_processing:
+ return
+
+ if 'content' in self.result:
+ self.result['content'] += data + ' '
+ else:
+ self.result['content'] = data + ' '
+
+ self.data_counter += 1
+
+def request(query, params):
+ params['url'] = search_url.format(query=urlencode({'w' :query}))
+ return params
+
+def response(resp):
+ parser = FilecropResultParser()
+ parser.feed(resp.text)
+
+ return parser.results
diff --git a/searx/engines/yacy.py b/searx/engines/yacy.py
new file mode 100644
index 000000000..e24edde56
--- /dev/null
+++ b/searx/engines/yacy.py
@@ -0,0 +1,38 @@
+from json import loads
+from urllib import urlencode, quote
+
+url = 'http://localhost:8090'
+search_url = '/yacysearch.json?{query}&maximumRecords=10'
+
+def request(query, params):
+ params['url'] = url + search_url.format(query=urlencode({'query':query}))
+ return params
+
+def response(resp):
+ raw_search_results = loads(resp.text)
+
+ if not len(raw_search_results):
+ return []
+
+ search_results = raw_search_results.get('channels', {})[0].get('items', [])
+
+ results = []
+
+ for result in search_results:
+ tmp_result = {}
+ tmp_result['title'] = result['title']
+ tmp_result['url'] = result['link']
+ tmp_result['content'] = ''
+
+ if len(result['description']):
+ tmp_result['content'] += result['description'] +"
"
+
+ if len(result['pubDate']):
+ tmp_result['content'] += result['pubDate'] + "
"
+
+ if result['size'] != '-1':
+ tmp_result['content'] += result['sizename']
+
+ results.append(tmp_result)
+
+ return results