import java.io.IOException;
import java.io.InputStream;
+import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import java.util.Scanner;
import java.util.Map.Entry;
+import org.json.JSONException;
+import org.json.JSONObject;
import org.jsoup.helper.DataUtil;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
Story story = new Story();
MetaData meta = getMeta();
- if (meta.getCreationDate() == null || meta.getCreationDate().isEmpty()) {
- meta.setCreationDate(StringUtils.fromTime(new Date().getTime()));
+ if (meta.getCreationDate() == null
+ || meta.getCreationDate().trim().isEmpty()) {
+ meta.setCreationDate(bsHelper
+ .formatDate(StringUtils.fromTime(new Date().getTime())));
}
story.setMeta(meta);
pg.put("meta", meta);
return story;
}
+ /**
+ * Utility method to convert the given URL into a JSON object.
+ * <p>
+ * Note that this method expects small JSON files (everything is copied into
+ * memory at least twice).
+ *
+ * @param url
+ * the URL to parse
+ * @param stable
+ * TRUE for more stable resources, FALSE when they often change
+ *
+ * @return the JSON object
+ *
+ * @throws IOException
+ * in case of I/O error
+ */
+ protected JSONObject getJson(String url, boolean stable)
+ throws IOException {
+ try {
+ return getJson(new URL(url), stable);
+ } catch (MalformedURLException e) {
+ throw new IOException("Malformed URL: " + url, e);
+ }
+ }
+
+ /**
+ * Utility method to convert the given URL into a JSON object.
+ * <p>
+ * Note that this method expects small JSON files (everything is copied into
+ * memory at least twice).
+ *
+ * @param url
+ * the URL to parse
+ * @param stable
+ * TRUE for more stable resources, FALSE when they often change
+ *
+ * @return the JSON object
+ *
+ * @throws IOException
+ * in case of I/O error
+ */
+ protected JSONObject getJson(URL url, boolean stable) throws IOException {
+ InputStream in = Instance.getInstance().getCache().open(url, null,
+ stable);
+ try {
+ Scanner scan = new Scanner(in);
+ scan.useDelimiter("\0");
+ try {
+ return new JSONObject(scan.next());
+ } catch (JSONException e) {
+ throw new IOException(e);
+ } finally {
+ scan.close();
+ }
+ } finally {
+ in.close();
+ }
+ }
+
/**
* Process the given story resource into a fully filled {@link Story}
* object.
sourceNode = loadDocument(source);
try {
- return doProcess(pg);
+ Story story = doProcess(pg);
+
+ // Check for "no chapters" stories
+ if (story.getChapters().isEmpty()
+ && story.getMeta().getResume() != null
+ && !story.getMeta().getResume().getParagraphs().isEmpty()) {
+ Chapter resume = story.getMeta().getResume();
+ resume.setName("");
+ resume.setNumber(1);
+ story.getChapters().add(resume);
+ story.getMeta().setWords(resume.getWords());
+
+ String descChapterName = Instance.getInstance().getTrans()
+ .getString(StringId.DESCRIPTION);
+ resume = new Chapter(0, descChapterName);
+ story.getMeta().setResume(resume);
+ }
+
+ return story;
} finally {
close();
}
story.setChapters(new ArrayList<Chapter>());
List<Entry<String, URL>> chapters = getChapters(pgGetChapters);
pgGetChapters.done(); // 20%
-
+
if (chapters != null) {
Progress pgChaps = new Progress("Extracting chapters", 0,
chapters.size() * 300);
words += cc.getWords();
story.getChapters().add(cc);
- story.getMeta().setWords(words);
i++;
}
+
+ story.getMeta().setWords(words);
pgChaps.setName("Extracting chapters");
pgChaps.done();
* the chapter name
* @param content
* the content of the chapter
- * @return the {@link Chapter}
+ *
+ * @return the {@link Chapter}, never NULL
*
* @throws IOException
* in case of I/O error