Last updated: 2022/02/10
A plain-text version can be found here.
#!/usr/bin/env perl
use strict;
use warnings;
our $VERSION = 'v1.0.0';
# Modules
use Cwd qw(abs_path cwd); # Core
use Config::Simple; # CPAN || dpkg libconfig-simple-perl
use Digest::SHA; # Core
use English qw(-no_match_vars); # Core
use File::Basename; # Core
use File::Find qw(finddepth); # Core
use File::Path qw(make_path); # Core
use Getopt::Long; # Core
use JSON; # CPAN || dpkg libjson-perl
use LWP::UserAgent; # CPAN || dpkg libwww-perl
use Pod::Usage; # Core
use Term::ANSIColor qw(:constants); # Core
use Term::ReadKey; # CPAN || dpkg libterm-readkey-perl
use Text::Table; # CPAN || dpkg libtext-table-perl
# Initialise data from the config file
my $config_file = "$ENV{HOME}/.config/app-neocities/neocities.ini";
my ( $cfg, $site_name, $api_key );
if ( -e $config_file ) {
$cfg = Config::Simple->new($config_file);
$site_name = $cfg->param('site.name');
$api_key = $cfg->param('site.api_key');
}
# Initialise the UA and API URL
my $ua = LWP::UserAgent->new;
if ($api_key) {
$ua->default_header( 'Authorization' => "Bearer $api_key" );
}
$ua->timeout(10);
$ua->env_proxy;
my $api = 'https://neocities.org/api/';
# Handy dispatch table listing available actions
my $actions = {
delete => sub { \&delete_files() },
help => sub { pod2usage( -verbose => 0 ) },
info => sub { \&info() },
list => sub { \&list_files() },
login => sub { \&login() },
logout => sub { \&logout() },
man => sub { pod2usage( -verbose => 2 ); },
push => sub { \&push_files() },
upload => sub { \&upload_files() },
version => sub { print basename($PROGRAM_NAME) . " $VERSION\n"; exit 0; },
};
# Do something or tell the user how to
my $subcommand = shift @ARGV;
if ( !$subcommand || !defined $actions->{$subcommand} ) {
pod2usage( -verbose => 0 );
}
$actions->{$subcommand}->();
# Given a query object, returns failure messages from the API so I don't have
# to keep typing it
sub failure {
my $query = shift;
my $response = decode_json $query->decoded_content;
return sprintf RED . '[%s] %s: %s', uc $response->{result},
$response->{error_type}, $response->{message} . RESET . "\n";
}
# Pretty-prints files based on certain criteria
sub pp {
my ( $file, $is_dir ) = @_;
# Directory
if ( $is_dir == 1 ) {
return sprintf "%s$file%s", BOLD . BLUE, RESET;
}
# HTML file
if (/[.]html?$/xmsi) {
return sprintf "%s$file%s", GREEN, RESET;
}
# Image file
if (/[.](?:bmp|gif|ico|jpe?g|png|svg$)/xmsi) {
return sprintf "%s$file%s", BOLD . MAGENTA, RESET;
}
# CSS file
if (/[.]css$/xmsi) {
return sprintf "%s$file%s", BOLD . GREEN, RESET;
}
# Javascript file
if (/[.](?:js|json|geojson)/xmsi) {
return sprintf "%s$file%s", YELLOW, RESET;
}
# Text file
if (/[.](?:txt|text|[ct]sv)/xmsi) {
return sprintf "%s$file%s", BOLD . YELLOW, RESET;
}
# XML file
if (/[.]xml/xmsi) {
return sprintf "%s$file%s", BOLD . RED, RESET;
}
# Web font file
if (/[.](?:eot|ttf|woff2?)/xmsi) {
return sprintf "%s$file%s", RED, RESET;
}
# Everything else
return "$file";
}
# Given an arrayref, true or false matching, a condition, and an optional
# message on match, return a trimmed arrayref of the results
sub trim {
my ( $files, $truth, $condition, $msg ) = @_;
my %operator = (
0 => q(!~),
1 => q(=~),
);
## no critic [ValuesAndExpressions::RequireInterpolationOfMetachars] # No
## no critic [ValuesAndExpressions::ProhibitImplicitNewlines] # No
my $cmd = ' for ( my $i = 0; $i <= $#{$files}; $i++ ) {
if ( @{$files}[$i] ' . $operator{$truth} . ' $condition ) {
if ($msg) {
print "$msg";
}
splice @{$files}, $i, 1;
$i--;
}
}
';
## no critic [ErrorHandling::RequireCheckingReturnValueOfEval] # I am, just indirectly, otherwise it breaks
## no critic [BuiltinFunctions::ProhibitStringyEval] # This is exactly what we want
eval $cmd;
if ($EVAL_ERROR) {
die "eval in trim() broke horribly: $EVAL_ERROR\n";
}
return $files;
}
# Shifts a given html file to the end of an array
sub face_files {
my ( $face, @files ) = @_;
if ( $face !~ /[.]html?$/xmsi ) {
die "Face file '$face' does not end in .htm or .html\n";
}
my $found = 0;
for my $file ( 0 .. $#files - 1 ) {
if ( $files[$file] eq $face ) {
splice @files, $file, 1;
push @files, $face;
$found = 1;
last;
}
}
if ( !$found ) {
die "Face file '$face' wasn't in the list of given files. Aborting\n";
}
return @files;
}
# Provides info on either the logged-in site or a given site
sub info {
my $query;
my $alt_site = shift @ARGV;
# If no argument is given, assume we want info on our own site
if ( !$alt_site ) {
if ( !$site_name ) {
die "Either a site name or login is needed to fetch info.\n";
}
$alt_site = $site_name;
}
$query = $ua->get( "$api" . "info?sitename=$alt_site" );
# Display the output, but only show the domain and the IPFS stuff if they
# exist, since they're uncommon.
if ( $query->is_success ) {
my $response = decode_json $query->decoded_content;
printf "Site: https://%s.neocities.org\n",
$response->{info}{sitename};
if ( $response->{info}{domain} ) {
print "Domain: $response->{info}{domain}\n";
}
if ( $response->{info}{latest_ipfs_hash} ) {
print "Latest IPFS hash: $response->{info}{latest_ipfs_hash}\n";
}
print "Created at: $response->{info}{created_at}\n";
if ( $response->{info}{last_updated} ) {
print "Last updated: $response->{info}{last_updated}\n";
}
else {
print "Last updated: Never\n";
}
printf "Tags: %s\n", join q(, ), @{ $response->{info}{tags} };
print "Views: $response->{info}{views}\n";
print "Hits: $response->{info}{hits}\n";
}
else {
print failure($query);
return 2;
}
return 1;
}
# Lists files in given directories on the logged-in site
sub list_files {
GetOptions(
'all|a' => \my $all,
'details|d' => \my $details
);
# Set the directory by looking at the remaining arguments, defaulting to '/'
# if --all is unset, and leaving it blank otherwise
my @dirs = q(); # This blank argument enables the recursive listing
if ( !$all ) {
if ( !@ARGV ) {
@dirs = q(/);
}
else {
@dirs = @ARGV;
}
}
# Ensure we have a key, then set up the query
if ( !$api_key ) {
print "A login is necessary in order to list your files.\n";
exit 3;
}
# Build a hash of all the files per section (allowing for multiple args;
# --all is one section, so rename it as such so we don't have an
# inconvenient zero-width key if we ever need to refer to it in the future)
my $files;
if ( $dirs[0] eq q() ) {
$dirs[0] = 'all';
$files->{section}{all} = get_files(q());
}
else {
## no critic [ControlStructures::ProhibitCStyleForLoops]
for ( my $i = 0; $i <= $#dirs; $i++ ) {
# Make sure to weed out bad directories
if ( my $tmp = get_files( $dirs[$i] ) ) {
$files->{section}{ $dirs[$i] } = $tmp;
}
else {
splice @dirs, $i, 1;
$i--;
}
}
}
# Process each directory
for my $dir ( 0 .. $#dirs ) {
# First, print the header, except in --details mode where it's less useful...
if ( !$details ) {
print "$files->{section}->{$dirs[$dir]}->{header}\n";
}
# ...then initialise the table headers as appropriate...
my $table;
if ($details) {
$table = Text::Table->new( BOLD . 'Name',
'Size', 'Last Modified' . RESET );
}
else {
$table = Text::Table->new();
}
# ...then add the files, saving files in / for last if applicable...
my $type = 'other_files';
LOOP:
my $file = $files->{section}->{ $dirs[$dir] }->{$type};
for my $data ( sort keys %{$file} ) {
if ($details) { # Load everything...
$table->load(
[ $file->{$data}->{path},
$file->{$data}->{size},
$file->{$data}->{updated_at}
]
);
}
else { # ...or just the path
$table->load( $file->{$data}->{path} );
}
}
if ( $type ne 'base_files'
&& exists $files->{section}->{ $dirs[$dir] }->{base_files} )
{
$table->add(q( )); # Aesthetic space for readability
$type = 'base_files';
goto LOOP;
}
# ...and then, finally, print it, avoiding a newline on the final entry
$dir == $#dirs ? print $table : print "$table\n";
}
return 1;
}
# Returns a hashref containing files from a given directory
sub get_files {
my $dir = shift || q();
my %files;
# Check for an API key first
if ( !$api_key ) {
print "A login is necessary in order to list files.\n";
exit 3;
}
my $query = $ua->get( "$api" . "list?path=$dir" );
if ( $query->is_success ) {
my $response = decode_json $query->decoded_content;
if ( !@{ $response->{files} } ) {
print "The directory '$dir' is either empty or nonexistent.\n";
return 0;
}
# Provide a header so the user knows where they are
if ( $dir ne q() && $dir ne q(/) ) {
$files{header} = BOLD . "[$dir]" . RESET;
}
else {
$files{header} = BOLD . '[root]' . RESET;
}
# Get the requested data and pretty-print it
for my $files ( @{ $response->{files} } ) {
for ( sort $files->{path} ) {
if ( $files->{is_directory} == 0 && !/[\/]/xms )
{ # Files in the root have no path separators
$files{base_files}{$_}{path} = pp( $_, 0 );
$files{base_files}{$_}{size} = $files->{size} || 0;
$files{base_files}{$_}{updated_at} = $files->{updated_at};
$files{base_files}{$_}{sha1_hash}
= $files->{sha1_hash} || 0;
}
else {
$files{other_files}{$_}{path}
= pp( $_, $files->{is_directory} );
$files{other_files}{$_}{size} = $files->{size} || 0;
$files{other_files}{$_}{updated_at}
= $files->{updated_at};
$files{other_files}{$_}{sha1_hash}
= $files->{sha1_hash} || 0;
}
}
}
}
# ...otherwise, politely fail.
else {
print failure($query);
next;
}
return \%files;
}
# Creates a config file
sub login {
# Sanity check
if ($api_key) {
print "An API key is already present. Try logging out first.\n";
exit 3;
}
# Prompt for user data...
print "What is your user name/site name?\n";
$site_name = <>;
chomp $site_name;
print "What is your password?\n";
ReadMode 2;
my $password = <>;
chomp $password;
ReadMode 0;
# ...prepare the request...
my $request = HTTP::Request->new( GET => $api . 'key' );
# ...add authentication...
$request->authorization_basic( $site_name, $password );
# ...and pull it all together
my $query = $ua->request($request);
if ( $query->is_success ) {
my $response = decode_json $query->decoded_content;
$cfg = Config::Simple->new( syntax => 'ini' );
$cfg->param( 'site.name', $site_name );
$cfg->param( 'site.api_key', $response->{api_key} );
if ( !-e dirname($config_file) ) {
make_path( dirname($config_file) )
or die 'Unable to create '
. dirname($config_file)
. ": $ERRNO\n";
}
$cfg->write($config_file)
|| die "Unable to write $config_file: $ERRNO\n";
}
else {
print failure($query);
return 2;
}
print "Config file written to $config_file\n";
return 1;
}
# Removes the site name and API key from the config file
sub logout {
if ( -e $config_file ) {
if ( !$cfg->param('site.name') ) {
print "Site name already cleared\n";
}
if ( !$cfg->param('site.api_key') ) {
print "API key already cleared\n";
}
$cfg->delete('site.name');
$cfg->delete('site.api_key');
$cfg->write();
print "Logout successful\n";
}
else {
print "$config_file doesn't exist. Nothing to do. Exiting.\n";
}
return 1;
}
# Deletes given files from the logged-in site
sub delete_files {
GetOptions(
'dry-run|d' => \my $dry_run,
'regex|r:s' => \my $regex,
'case-insensitive|i' => \my $case_insensitive
);
# Get files either from @ARGV or the site
my @files;
if ($regex) {
my $remote_listing = get_files();
for (
keys %{ $remote_listing->{base_files} },
keys %{ $remote_listing->{other_files} }
)
{
push @files, $_;
}
@files = sort @files;
}
else {
@files = sort @ARGV;
}
# If needed, use regex to decide what files to kill
if ($regex) {
if ($case_insensitive) {
$regex = qr/$regex/ixms;
}
else {
$regex = qr/$regex/xms;
}
@files = @{ trim( \@files, '0', $regex ) };
}
# Ensure that index.html isn't targeted, to avoid issues later
@files = @{
trim( \@files, '1', qr/^index.html$/ixms,
sprintf BOLD
. YELLOW
. '[WARNING] '
. RESET
. "index.html cannot be deleted. Removing it from the list.\n"
)
};
# Last-minute error-checking
if ( !@files ) {
print "No (valid) files were provided.\n";
return 2;
}
# Do a dry-run if requested, then return
if ($dry_run) {
print "Simulating...\n";
print "The following deletions would be attempted:\n";
for (@files) {
print "$_\n";
}
return 1;
}
# Otherwise, ensure we have a key, then execute the query
if ( !$api_key ) {
print "A login is necessary in order to delete your files.\n";
exit 3;
}
my $query = $ua->post( $api . 'delete', { 'filenames[]' => \@files } );
if ( $query->is_success ) {
print "File(s) successfully deleted: \n";
for (@files) {
print "$_\n";
}
}
else {
print failure($query);
return 2;
}
return 1;
}
# Recursively uploads files
sub push_files {
GetOptions(
'dry-run|d' => \my $dry_run,
'face|f:s' => \my $face,
'regex|r:s' => \my $regex,
'case-insensitive|i' => \my $case_insensitive
);
# Recursively get files using absolute paths relative to the supplied
# directory as well as the sha1 hash of each file
my $dir;
if (@ARGV) {
$dir = abs_path( shift @ARGV );
}
else {
$dir = cwd;
}
# First, set up the regex if needed
if ($regex) {
if ($case_insensitive) {
$regex = qr/$regex/ixms;
}
else {
$regex = qr/$regex/xms;
}
}
# Then, gather the files
my %local_files;
finddepth(
sub {
return if (-d);
my $base = basename($dir);
# Construct the filename, then chop off everything up through
# the cwd, omitting the (new) leading slash...
( my $file_name = $File::Find::dir . "/$_" )
=~ s{.* \Q$base\E /}{}xms;
# ...run it through the regex if requested...
if ( !$regex || $file_name =~ /$regex/xms ) {
# ...and calculate the sha1.
$local_files{$file_name}{sha1}
= Digest::SHA->new(1)->addfile($File::Find::name)
->hexdigest;
}
},
$dir
);
# Same as above, but for the remote files
my $remote_files = get_files(q());
my @files;
# Now, create the list of local files that are (potentially) eligible for
# upload by comparing sha1 signatures
for my $file_name ( keys %local_files ) {
if ( !exists $remote_files->{$file_name}{sha1}
|| $local_files{file_name}{sha1} ne
$remote_files->{$file_name}{sha1} )
{
push @files, $file_name;
}
}
# If a face file is requested, make it so
if ($face) {
@files = face_files( $face, @files );
}
# Do a dry-run if requested, then return...
if ($dry_run) {
print "Simulating...\n";
print "The following uploads would be attempted:\n";
for (@files) {
print "$_\n";
}
return 1;
}
# ...and call upload_files() to do the heavy lifting
upload_files(@files);
return 1;
}
# Uploads individual files, optionally preserving paths or putting the group in
# a given directory
## no critic [Subroutines::RequireArgUnpacking] # Have patience
sub upload_files {
my @files;
my ( $fails, $dry_run, $face, $preserve, $target_dir );
# Anything calling this directly should act like push_files(), which
# implies preserved paths and recursion
if (@_) {
@files = @_;
$preserve = 1;
}
# Otherwise, respect the options and operate normally
else {
GetOptions(
'dry-run|d' => \$dry_run,
'face|f:s' => \$face,
'preserve-paths|p' => \$preserve,
'target-dir|t:s' => \$target_dir,
);
@files = @ARGV;
}
# If a face file is requested, make it so.
if ($face) {
@files = face_files( $face, @files );
}
if ($dry_run) {
print "Simulating...\n";
print "The following uploads would be attempted:\n";
}
# Process the files
for my $local_file (@files) {
# Resolve the canonical path
$local_file = abs_path($local_file);
my $remote_file = $local_file;
# Strip the paths unless told not to...
if ( !$preserve ) {
$remote_file = basename($local_file);
}
else {
# ...in which case, strip the path through the cwd...
my $base = cwd;
$remote_file =~ s{.* \Q$base\E /}{}xms;
}
# ...and apply any path modifier, removing extra / characters
if ($target_dir) {
$remote_file = $target_dir . "/$remote_file";
$remote_file =~ s/[\/]{2,}/\//gxms;
}
if ($dry_run) {
print "$local_file as /$remote_file\n";
next;
}
# Check for an API key
if ( !$api_key ) {
print "A login is necessary in order to upload files.\n";
exit 3;
}
# Set up the query -- note the unique headers
my $query = $ua->post(
$api . 'upload',
Content_Type => 'form-data',
Content => [ $remote_file => [$local_file], ],
);
if ( $query->is_success ) {
print "$local_file uploaded successfully to /$remote_file\n";
}
else {
print failure($query);
$fails++;
}
}
# Conclude gracefully one way or another
if ($fails) {
print "Total failed uploads: $fails\n";
exit 2;
}
return 1;
}
__END__
# Documentation
=pod
=head1 NAME
Neocities.pl - Neocities CLI program
=head1 USAGE
perl neocities.pl [mode] [OPTION...]
info <site> Gets info on the logged-in or specified site
list <dirs> Lists files in root or the given directorys
-a, --all Lists all files recusively starting from the root
-d, --details Also shows the file size in bytes and the modification time
delete [files] Deletes the specified files from the logged-in site
-d, --dry-run Shows the anticipated results but doesn't actually do anything
-r, --regex Provides a regex to match files with instead; see 'man' for details
-i, --case-insensitive Makes a provided regex case-insensitive; useless on its own
upload <files> Uploads specified files to the logged-in site's root directory
-d, --dry-run Shows the anticipated results but doesn't actually do anything
-f, --face <HTML file> Ensures the given file is uploaded last (for the social media aspect)
-p, --preserve-path Preserves the files' path on the remote site
-t, --target-dir Puts all files into a specified dir instead; can be used with -p
push <dir> Uploads files recursively from the given directory to the logged-in site
-d, --dry-run Shows the anticipated results but doesn't actually do anything
-f, --face <HTML file> Ensures the given file is uploaded last (for the social media aspect)
-r, --regex Provides a regex to match files with instead; see 'man' for details
-i, --case-insensitive Makes a provided regex case-insensitive; useless on its own
login Prompts for login info to get an API key; necessary for most operations
logout Removes the site name and API key from the config file
help Display this help text
man Displays the full embedded manual
version Displays the version and then exits
=head1 DESCRIPTION
Neocities.pl is an enhanced replacement for the "neocities" Ruby client
tailored to work more easily with the author's preferred workflow. It offers
all the features of the original, but has some different assumptions baked-in,
such as the idea that the working directory is intended to mirror the structure
on the remote site, and that there's a build process that produces HTML files
which .gitignore is expected to ignore.
=head1 REQUIRED ARGUMENTS
Initial usage of "login" is required for all meaningful operations except for
fetching public site details.
=head1 OPTIONS
=head2 list
-a, --all
-d, --details
If --all is used, any directories given will be ignored and instead a complete
site listing will be returned. It is worth noting that the root directory is
currently treated specially: files contained within will be listed last, to
avoid getting them lost in the listing. This currently applies with or without
--all.
If --details is used, all files will be listed in columns detailing the name,
file size, and modification time.
In all cases, output is colourised as follows:
* Directories are light blue.
* HTML files are dark green.
* Image files are light magenta.
* CSS file are light green.
* Javascript files are brown.
* Text files are light yellow.
* XML files are light red.
* Web font files (except svg) are dark red.
* Everything else is light grey.
The colours are essentially arbitrary.
=head2 delete
-d, --dry-run
-r, --regex
-i, --case-insensitive
If --regex is used, all file arguments will be ignored, as the regex will be
used against a full listing of files from the remote site. If
--case-insensitive is specified but --regex is not, the former will be silently
ignored.
Any valid perlre pattern can theoretically be used. Do not include the
delimiters or any modifiers. Any expression provided is assumed to use the x,
m, and s modifiers. With --case-insensitive specified, the i modifier is
included as well.
In order to faciliate safe experimentation, remember that --dry-run will print
a list of selected files and then abort.
For further information, consult L<perlre|perlre>.
=head2 upload
-d, --dry-run
-f, --face
-p, --preserve-path
-t, --target-dir
Normally, the upload command puts all supplied files into the root directory of
the site. However, if --preserve-path is used, the files' path is preserved.
This path is relative to both the remote root and the local supplied file.
Using --target-dir will essentially change what the remote root directory is
considered to be, and can be freely combined with --preserve-path as desired.
For instance, given a file called "./foo/bar/baz.html":
Default settings:
* The remote root path is /.
* The local file is regarded as baz.html.
* Therefore, the remote file will be /baz.html.
With --preserve-path:
* The remote root path is /.
* The local file is regarded as ./foo/bar/baz.html.
* Therefore, the remote file will be /foo/bar/baz.html. /foo and /bar will
both be created if they do not already exist.
With --target-dir set to "mydir":
* The remote root path is /mydir.
* The local file is regarded as baz.html.
* Therefore, the remote file will be /mydir/baz.html.
With both settings enabled as above:
* The remote root path is /mydir.
* The local file is regarded as /foo/bar/baz.html.
* Therefore, the remote file will be /mydir/foo/bar/baz.html. All three
directories will be created if they do not already exist.
As such, be very careful where you invoke this program from. Using the
--dry-run option will help ensure that everything is the way you're meaning it
to be before doing the real thing.
Additionally, in order to accomodate people who care about the "face" of their
posts in Neocities' social media display, the --face option will take a given
HTML file and ensure that it is placed at the end of the list of files to
upload, which ensures that it is the primary page shown in the social media
feed. Giving a filename that isn't present in the list or an HTML file is a
fatal error. Specifically, the filename must end in .htm or .html.
Note that this feature is intended primarily to facilitate the use of globs or
the "push" command (detailed below). When supplying all files by their full
name, this switch isn't really needed, as this program does not alter the order
of the files it receives (with the obvious exception of this switch).
=head2 push
-d, --dry-run
-f, --face
-r, --regex
-i, --case-insensitive
This function is designed specifically with the assumption that you're
mirroring a given directory starting from root. For instance, if your CWD is
~/website, and you invoke this command on ~/website/img, then the files will be
uploaded to /img.
The options work essentially identically to their "upload" and "delete"
counterparts respectively. As such, all advice given there applies here.
Given that all paths are, as mentioned, treated relatively from the given path
(or the current directory if unspecified), the advice for "upload" also
applies.
=head1 DIAGNOSTICS
Errors are explicitly specified in the output.
=head1 EXIT STATUS
0 is some form of success
1 is a bad query
2 is a partial success when uploading files
3 is a missing API key
Anything else is something otherwise fatal
=head1 CONFIGURATION
Initial configuration is done by invoking the "login" routine, which currently
asks for a site name and password, storing the former and trading the latter
for an API key.
The file itself is currently stored as
"$HOME/.config/app-neocities/neocities.ini".
=head1 DEPENDENCIES
Core (these are probably already installed):
* Cwd
* Digest::SHA
* English
* File::Basename
* File::Find
* File::Path
* Getopt::Long
* Pod::Usage
* Term::ANSIColor
Non-core (search your package manager or use CPAN):
* Config::Simple
* JSON
* LWP::UserAgent
* Term::ReadKey
* Text::Table
=head1 INCOMPATIBILITIES
No provision is given for owning multiple sites at this point, mostly because I
can't test it. People in that situation can likely make better use of WebDAV's
remote mounting anyway.
=head1 BUGS AND LIMITATIONS
If you named your files with dashes in front, it probably won't work. Don't do
that.
Report any other bugs found to the author. Alternatively, useful patches are
welcome, but not necessarily guaranteed to be applied.
=head1 AUTHOR
ShadowM00n <shadowm00n@airmail.cc>
=head1 LICENSE AND COPYRIGHT
Copyright 2021 ShadowM00n
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
=cut