summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSolomon Peachy <pizza@shaftnet.org>2014-01-28 07:59:17 -0500
committerSolomon Peachy <pizza@shaftnet.org>2014-01-28 07:59:17 -0500
commitd36807a0ab2f8bc91788878c0a37ec4dad28120a (patch)
tree5a34e072c456df0988d579c713f62da8ba83ed61
parentdf27cb7b8ebac24c6645e0b0f31f2629adb4c74c (diff)
downloadslp_misc-d36807a0ab2f8bc91788878c0a37ec4dad28120a.tar.gz
slp_misc-d36807a0ab2f8bc91788878c0a37ec4dad28120a.tar.bz2
slp_misc-d36807a0ab2f8bc91788878c0a37ec4dad28120a.zip
Add more stuff.
-rw-r--r--Makefile11
-rwxr-xr-x[-rw-r--r--]nb2ikiwiki.pl4
-rwxr-xr-xnb2nikola.pl158
3 files changed, 170 insertions, 3 deletions
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..6727ed9
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,11 @@
+FILES_IN := $(shell ls *NEF)
+FILES_OUT := $(FILES_IN:.NEF=.jpg)
+
+all: $(FILES_OUT)
+
+%.ppm: %.NEF
+ dcraw -o 1 -q 3 -w -v $<
+
+%.jpg: %.ppm
+ convert $< -quality 85 $@
+
diff --git a/nb2ikiwiki.pl b/nb2ikiwiki.pl
index 2835985..3818ad8 100644..100755
--- a/nb2ikiwiki.pl
+++ b/nb2ikiwiki.pl
@@ -162,9 +162,7 @@ close(OUT);
my $year = &UnixDate($date, "%Y");
my $month = &UnixDate($date, "%m");
mkdir("$year");
- mkdir("$year/$month");
-
- open(OUT, ">" . $year . "/" . $month . "/" . sanitize($title) . ".mdwn") or die "Unable to open article output file";
+ open(OUT, ">" . $year . "/" . sanitize($title) . ".mdwn") or die "Unable to open article output file";
print OUT $formatted_article;
close(OUT);
}
diff --git a/nb2nikola.pl b/nb2nikola.pl
new file mode 100755
index 0000000..6c20af3
--- /dev/null
+++ b/nb2nikola.pl
@@ -0,0 +1,158 @@
+#!/usr/bin/perl
+#
+# nb2nikola --- a conversion script from NanoBlogger to nikola
+#
+# Released under the HOT-BEVERAGE-OF-MY-CHOICE LICENSE: Solomon Peachy wrote
+# this script. As long you retain this notice, you can do whatever you want
+# with it. If we meet some day, and you feel like it, you can buy me a hot
+# beverage of my choice in return.
+
+use strict;
+use warnings;
+
+use HTML::WikiConverter::Markdown;
+use Date::Manip;
+
+my $input_directory = "/home/pizza/nanoblogger-3.5-rc1/pizza/data";
+opendir(IN, $input_directory) or die "Unable to open input directory";
+
+my @files = readdir(IN);
+
+# Identify database files and store tags for the filenames; the tags are forced
+# to become lowercase
+
+my %tags = ();
+foreach my $file (@files)
+{
+ if($file =~ m/\.db$/i && !($file eq "master.db"))
+ {
+ open(DB, $input_directory . "/" . $file) or die "Unable to open database file";
+
+ my $category = lc(<DB>); # Category is always the first line of the file
+ chomp($category);
+ $category =~ tr/. /_/;
+
+ foreach my $article (<DB>)
+ {
+ # Ignore assignments of multiple tags, i.e. foo.txt>1,3. I only require the filename.
+ $article =~ m/(.*\.txt).*/;
+
+ if(exists($tags{$1})) {
+ $tags{$1} .= $category . " ";
+ } else {
+ $tags{$1} = $category . " ";
+ }
+ }
+
+ close(DB);
+ }
+}
+
+# Process articles
+
+my $wc = new HTML::WikiConverter( dialect => 'Markdown',
+ link_style => 'inline',
+ image_style => 'inline',
+ header_style => 'atx');
+
+foreach my $file (@files)
+{
+ if($file =~ m/\.txt$/i)
+ {
+ open(ARTICLE, $input_directory . "/" . $file) or die "Unable to open article file";
+
+ # This will store the lines that belong to the actual content
+ # of the article
+ my $raw_article;
+
+ my $title = "";
+ my $author = "";
+ my $desc = "";
+ my $date = "";
+ my $format = "raw";
+ my $slug;
+ foreach my $line (<ARTICLE>) {
+ chomp($line);
+ $line =~ s/\s+$//; # remove trailing whitespaces
+ if ($line =~ /TITLE:\s*(.*)/) {
+ $title = $1;
+ $slug = sanitize(lc($1));
+ next;
+ }
+ if ($line =~ /AUTHOR:\s*(.*)/) {
+ $author = $1;
+ next;
+ }
+ if ($line =~ /DATE:\s*(.*)/) {
+ $date = $1;
+ $date =~ tr/@/ /;
+ $date = &ParseDateString($date);
+ $date = &UnixDate($date, "%Y/%m/%d %T");
+# $date = &UnixDate($date, "%Y-%m-%d %T");
+# $date = &UnixDate($date, "%B %d, %Y @ %H:%M %Z");
+ next;
+ }
+ if ($line =~ /DESC:\s*(.*)/) {
+ $desc = $1;
+ next;
+ }
+ if ($line =~ /FORMAT:\s*(.*)/) {
+ $format = $1;
+ next;
+ }
+
+ # Article delimiters are hardcoded -- works for me...
+ if($line =~ m/BODY\:$/ or $line =~ m/END(-){5}$/ or $line =~ m/(-){5}$/)
+ {
+ next;
+ }
+
+ $raw_article .= $line . "\n";
+ }
+
+ close(ARTICLE);
+
+ # Full article is created by prepending the title and appendig
+ # the stored tags
+
+ open(OUT, ">00-" . sanitize($title) . ".md");
+ print OUT "<!--\n";
+ print OUT ".. link: \n";
+ print OUT ".. description: $desc\n";
+ print OUT ".. tags: $tags{$file}\n";
+ print OUT ".. date: $date\n";
+ print OUT ".. title: $title\n";
+ print OUT ".. slug: $slug\n";
+ print OUT ".. type: text\n";
+ print OUT "-->\n\n";
+
+ if ($format eq 'markdown') {
+ print OUT $raw_article;
+ } else {
+ print OUT $wc->html2wiki($raw_article);
+ }
+ close(OUT);
+ }
+}
+
+closedir(IN);
+
+# Sanitizes a filename, following the example of Wordpress:
+# * Convert to lowercase
+# * Remove non-alphanumeric characters
+# * Replace spaces and dashes with underscores
+# * Replace adjacent underscores with a single underscore
+# * Remove a trailing underscore
+
+sub sanitize
+{
+ my ($file) = @_;
+ my $sanitized = lc($file);
+
+ $sanitized =~ s/[^0-9a-z_\- \t\n\r\f]//g;
+ $sanitized =~ s/[\s\-]/_/g;
+ $sanitized =~ s/__+/_/g;
+ $sanitized =~ s/_$//;
+
+ return($sanitized);
+}