summaryrefslogtreecommitdiff
path: root/indexer.py
diff options
context:
space:
mode:
authoryvesf <yvesf@pinky.(none)>2010-11-24 21:40:37 +0100
committeryvesf <yvesf@pinky.(none)>2010-11-24 21:40:37 +0100
commit708137185642f408427372300144412a4102ec38 (patch)
tree7d7f4e766a7c94724267414a87e7dd5f527c231e /indexer.py
parentcc80a271ba5b2b11a2c92883fbf5734fd3420aca (diff)
downloadbooksearch-708137185642f408427372300144412a4102ec38.tar.gz
booksearch-708137185642f408427372300144412a4102ec38.zip
python2.5 compatibility
Diffstat (limited to 'indexer.py')
-rw-r--r--indexer.py15
1 files changed, 8 insertions, 7 deletions
diff --git a/indexer.py b/indexer.py
index 8ab4cdf..913591a 100644
--- a/indexer.py
+++ b/indexer.py
@@ -7,7 +7,7 @@ import pyPdf
import whoosh.index as index
import whoosh.writing as writing
import whoosh.fields as fields
-import multiprocessing as mp
+from compat import str_format
schema_book = fields.Schema(
pagenumber=fields.NUMERIC(stored=True),
@@ -34,7 +34,7 @@ filepaths = []
directory = unicode(sys.argv[1], "utf8")
searcher_book = index_book.searcher()
searcher_metadata = index_metadata.searcher()
-print u"Walking {0}".format(directory)
+print str_format(u"Walking {dir}",dir=directory)
filecount = 0
skipped = 0
for path, directories, files in os.walk(directory):
@@ -47,7 +47,7 @@ for path, directories, files in os.walk(directory):
else:
#skip files that are already indexed
skipped += 1
- print u"\r{0} files found {1} skipped".format(filecount+skipped, skipped),
+ print str_format(u"\r{count} files found {skip} skipped", count=filecount+skipped, skip=skipped),
print ""
if not create_index: #update index for deleted files
@@ -62,7 +62,7 @@ if not create_index: #update index for deleted files
writer_book.delete_by_term(u'path', path)
writer_metadata.delete_by_term('path', path)
deleted += 1
- print u"\r{0} pages processed. {1} deleted".format(processed, deleted),
+ print str_format(u"\r{proc} pages processed. {deleted} deleted", proc=processed, deleted=deleted),
print ""
writer_book.commit()
writer_metadata.commit()
@@ -72,7 +72,7 @@ searcher_metadata.close()
def process_file(filepath):
try:
- print u"{0} processing {1}".format(os.getpid(), filepath)
+ print str_format(u"{pid} processing {filepath}", pid=os.getpid(), filepath=filepath)
inputfile = pyPdf.PdfFileReader(file(filepath, 'r'))
title = inputfile.getDocumentInfo().title
@@ -83,7 +83,7 @@ def process_file(filepath):
pagenumber = 1
for page in inputfile.pages:
- print u"{0} processing {1} Page {2}".format(os.getpid(), filepath, pagenumber)
+ print str_format(u"{pid} processing {filepath} Page {page}", pid=os.getpid(), filepath=filepath, page=pagenumber)
content = page.extractText()
writer_book.add_document(pagenumber=pagenumber,
path=filepath,
@@ -94,9 +94,10 @@ def process_file(filepath):
except KeyboardInterrupt:
return 'KeyboardInterrupt'
except Exception,e:
- print u"{0} failed at {1}: {2}".format(os.getpid(), filepath, e)
+ print str_format(u"{pid} failed at {filepath}: {err}", pid=os.getpid(), filepath=filepath, err=e)
try:
+ import multiprocessing as mp
pool = mp.Pool()
jobs = []
for filepath in filepaths: