2020-01-19 21:53:58 +00:00
|
|
|
|
{config, lib, pkgs, ...}:
|
|
|
|
|
|
|
|
|
|
with lib;
|
|
|
|
|
let
|
|
|
|
|
cfg = config.services.docspell-joex;
|
|
|
|
|
user = if cfg.runAs == null then "docspell" else cfg.runAs;
|
|
|
|
|
configFile = pkgs.writeText "docspell-joex.conf" ''
|
|
|
|
|
{"docspell": { "joex":
|
|
|
|
|
${builtins.toJSON cfg}
|
|
|
|
|
}}
|
|
|
|
|
'';
|
2020-01-22 22:11:02 +00:00
|
|
|
|
defaults = {
|
|
|
|
|
app-id = "joex1";
|
|
|
|
|
base-url = "http://localhost:7878";
|
|
|
|
|
bind = {
|
|
|
|
|
address = "localhost";
|
|
|
|
|
port = 7878;
|
|
|
|
|
};
|
2020-05-25 13:32:03 +00:00
|
|
|
|
mail-debug = false;
|
2020-01-22 22:11:02 +00:00
|
|
|
|
jdbc = {
|
|
|
|
|
url = "jdbc:h2:///tmp/docspell-demo.db;MODE=PostgreSQL;DATABASE_TO_LOWER=TRUE;AUTO_SERVER=TRUE";
|
|
|
|
|
user = "sa";
|
|
|
|
|
password = "";
|
|
|
|
|
};
|
2020-04-30 19:28:28 +00:00
|
|
|
|
send-mail = {
|
|
|
|
|
list-id = "";
|
|
|
|
|
};
|
2020-05-25 13:32:03 +00:00
|
|
|
|
user-tasks = {
|
|
|
|
|
scan-mailbox = {
|
|
|
|
|
max-folders = 50;
|
|
|
|
|
mail-chunk-size = 50;
|
|
|
|
|
max-mails = 500;
|
|
|
|
|
};
|
|
|
|
|
};
|
2020-01-22 22:11:02 +00:00
|
|
|
|
scheduler = {
|
|
|
|
|
pool-size = 2;
|
|
|
|
|
counting-scheme = "4,1";
|
2020-06-25 21:56:44 +00:00
|
|
|
|
retries = 2;
|
2020-01-22 22:11:02 +00:00
|
|
|
|
retry-delay = "1 minute";
|
|
|
|
|
log-buffer-size = 500;
|
|
|
|
|
wakeup-period = "30 minutes";
|
|
|
|
|
};
|
2020-03-27 19:09:49 +00:00
|
|
|
|
periodic-scheduler = {
|
|
|
|
|
wakeup-period = "10 minutes";
|
|
|
|
|
};
|
|
|
|
|
house-keeping = {
|
|
|
|
|
schedule = "Sun *-*-* 00:00:00";
|
|
|
|
|
cleanup-invites = {
|
|
|
|
|
enabled = true;
|
|
|
|
|
older-than = "30 days";
|
|
|
|
|
};
|
|
|
|
|
cleanup-jobs = {
|
|
|
|
|
enabled = true;
|
|
|
|
|
older-than = "30 days";
|
|
|
|
|
delete-batch = 100;
|
|
|
|
|
};
|
2020-12-14 13:51:56 +00:00
|
|
|
|
cleanup-remember-me = {
|
|
|
|
|
enabled = true;
|
|
|
|
|
older-than = "30 days";
|
|
|
|
|
};
|
2020-03-27 19:09:49 +00:00
|
|
|
|
};
|
2020-01-22 22:11:02 +00:00
|
|
|
|
extraction = {
|
2020-02-22 11:40:56 +00:00
|
|
|
|
pdf = {
|
2020-11-14 15:02:39 +00:00
|
|
|
|
min-text-len = 500;
|
|
|
|
|
};
|
|
|
|
|
preview = {
|
|
|
|
|
dpi = 32;
|
2020-01-22 22:11:02 +00:00
|
|
|
|
};
|
2020-02-22 11:40:56 +00:00
|
|
|
|
|
|
|
|
|
ocr = {
|
|
|
|
|
max-image-size = 14000000;
|
|
|
|
|
page-range = {
|
|
|
|
|
begin = 10;
|
|
|
|
|
};
|
|
|
|
|
ghostscript = {
|
|
|
|
|
working-dir = "/tmp/docspell-extraction";
|
|
|
|
|
command = {
|
|
|
|
|
program = "${pkgs.ghostscript}/bin/gs";
|
|
|
|
|
args = [ "-dNOPAUSE" "-dBATCH" "-dSAFER" "-sDEVICE=tiffscaled8" "-sOutputFile={{outfile}}" "{{infile}}" ];
|
|
|
|
|
timeout = "5 minutes";
|
|
|
|
|
};
|
|
|
|
|
};
|
|
|
|
|
unpaper = {
|
|
|
|
|
command = {
|
|
|
|
|
program = "${pkgs.unpaper}/bin/unpaper";
|
|
|
|
|
args = [ "{{infile}}" "{{outfile}}" ];
|
|
|
|
|
timeout = "5 minutes";
|
|
|
|
|
};
|
|
|
|
|
};
|
|
|
|
|
tesseract = {
|
|
|
|
|
command= {
|
|
|
|
|
program = "${pkgs.tesseract4}/bin/tesseract";
|
|
|
|
|
args = ["{{file}}" "stdout" "-l" "{{lang}}" ];
|
|
|
|
|
timeout = "5 minutes";
|
|
|
|
|
};
|
2020-01-22 22:11:02 +00:00
|
|
|
|
};
|
|
|
|
|
};
|
2020-02-22 11:40:56 +00:00
|
|
|
|
};
|
2020-03-27 21:54:49 +00:00
|
|
|
|
text-analysis = {
|
|
|
|
|
max-length = 10000;
|
2021-01-16 22:43:24 +00:00
|
|
|
|
nlp = {
|
|
|
|
|
mode = "full";
|
|
|
|
|
clear-interval = "15 minutes";
|
|
|
|
|
regex-ner = {
|
|
|
|
|
max-entries = 1000;
|
|
|
|
|
file-cache-time = "1 minute";
|
|
|
|
|
};
|
2020-08-24 21:25:57 +00:00
|
|
|
|
};
|
2020-09-02 20:23:08 +00:00
|
|
|
|
classification = {
|
|
|
|
|
enabled = true;
|
|
|
|
|
item-count = 0;
|
|
|
|
|
classifiers = [
|
|
|
|
|
{ "useSplitWords" = "true";
|
|
|
|
|
"splitWordsTokenizerRegexp" = ''[\p{L}][\p{L}0-9]*|(?:\$ ?)?[0-9]+(?:\.[0-9]{2})?%?|\s+|.'';
|
|
|
|
|
"splitWordsIgnoreRegexp" = ''\s+'';
|
|
|
|
|
"useSplitPrefixSuffixNGrams" = "true";
|
|
|
|
|
"maxNGramLeng" = "4";
|
|
|
|
|
"minNGramLeng" = "1";
|
|
|
|
|
"splitWordShape" = "chris4";
|
|
|
|
|
"intern" = "true";
|
|
|
|
|
}
|
|
|
|
|
];
|
|
|
|
|
};
|
2020-08-24 21:25:57 +00:00
|
|
|
|
working-dir = "/tmp/docspell-analysis";
|
2020-03-27 21:54:49 +00:00
|
|
|
|
};
|
2020-06-25 23:27:11 +00:00
|
|
|
|
processing = {
|
|
|
|
|
max-due-date-years = 10;
|
|
|
|
|
};
|
2020-02-22 11:40:56 +00:00
|
|
|
|
convert = {
|
|
|
|
|
chunk-size = 524288;
|
2020-11-14 15:02:39 +00:00
|
|
|
|
converted-filename-part = "converted";
|
2020-02-22 11:40:56 +00:00
|
|
|
|
max-image-size = 14000000;
|
|
|
|
|
|
|
|
|
|
markdown = {
|
|
|
|
|
internal-css = ''
|
|
|
|
|
body { padding: 2em 5em; }
|
|
|
|
|
'';
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
wkhtmlpdf = {
|
2020-01-22 22:11:02 +00:00
|
|
|
|
command = {
|
2020-02-22 11:40:56 +00:00
|
|
|
|
program = "${pkgs.wkhtmltopdf}/bin/wkhtmltopdf";
|
|
|
|
|
args = ["-s" "A4" "--encoding" "UTF-8" "-" "{{outfile}}"];
|
|
|
|
|
timeout = "2 minutes";
|
2020-01-22 22:11:02 +00:00
|
|
|
|
};
|
2020-02-22 11:40:56 +00:00
|
|
|
|
working-dir = "/tmp/docspell-convert";
|
2020-01-22 22:11:02 +00:00
|
|
|
|
};
|
2020-02-22 11:40:56 +00:00
|
|
|
|
|
2020-01-22 22:11:02 +00:00
|
|
|
|
tesseract = {
|
2020-02-22 11:40:56 +00:00
|
|
|
|
command = {
|
2020-01-22 22:11:02 +00:00
|
|
|
|
program = "${pkgs.tesseract4}/bin/tesseract";
|
2020-02-22 11:40:56 +00:00
|
|
|
|
args = ["{{infile}}" "out" "-l" "{{lang}}" "pdf" "txt"];
|
2020-01-22 22:11:02 +00:00
|
|
|
|
timeout = "5 minutes";
|
|
|
|
|
};
|
2020-02-22 11:40:56 +00:00
|
|
|
|
working-dir = "/tmp/docspell-convert";
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
unoconv = {
|
|
|
|
|
command = {
|
|
|
|
|
program = "${pkgs.unoconv}/bin/unoconv";
|
|
|
|
|
args = ["-f" "pdf" "-o" "{{outfile}}" "{{infile}}"];
|
|
|
|
|
timeout = "2 minutes";
|
|
|
|
|
};
|
|
|
|
|
working-dir = "/tmp/docspell-convert";
|
2020-01-22 22:11:02 +00:00
|
|
|
|
};
|
2020-07-18 10:48:41 +00:00
|
|
|
|
|
|
|
|
|
ocrmypdf = {
|
|
|
|
|
enabled = true;
|
|
|
|
|
command = {
|
|
|
|
|
program = "${pkgs.ocrmypdf}/bin/ocrmypdf";
|
|
|
|
|
args = [
|
|
|
|
|
"-l" "{{lang}}"
|
|
|
|
|
"--skip-text"
|
|
|
|
|
"--deskew"
|
|
|
|
|
"-j" "1"
|
|
|
|
|
"{{infile}}"
|
|
|
|
|
"{{outfile}}"
|
|
|
|
|
];
|
|
|
|
|
timeout = "5 minutes";
|
|
|
|
|
};
|
|
|
|
|
working-dir = "/tmp/docspell-convert";
|
|
|
|
|
};
|
2020-01-22 22:11:02 +00:00
|
|
|
|
};
|
2020-05-25 13:32:03 +00:00
|
|
|
|
files = {
|
|
|
|
|
chunk-size = 524288;
|
|
|
|
|
valid-mime-types = [];
|
|
|
|
|
};
|
2020-06-25 21:56:44 +00:00
|
|
|
|
full-text-search = {
|
|
|
|
|
enabled = false;
|
|
|
|
|
solr = {
|
|
|
|
|
url = "http://localhost:8983/solr/docspell";
|
|
|
|
|
commit-within = 1000;
|
|
|
|
|
log-verbose = false;
|
|
|
|
|
def-type = "lucene";
|
|
|
|
|
q-op = "OR";
|
|
|
|
|
};
|
|
|
|
|
migration = {
|
|
|
|
|
index-all-chunk = 10;
|
|
|
|
|
};
|
|
|
|
|
};
|
2020-01-22 22:11:02 +00:00
|
|
|
|
};
|
2020-01-19 21:53:58 +00:00
|
|
|
|
in {
|
|
|
|
|
|
|
|
|
|
## interface
|
|
|
|
|
options = {
|
|
|
|
|
services.docspell-joex = {
|
|
|
|
|
enable = mkOption {
|
|
|
|
|
type = types.bool;
|
|
|
|
|
default = false;
|
|
|
|
|
description = "Whether to enable docspell docspell job executor.";
|
|
|
|
|
};
|
|
|
|
|
runAs = mkOption {
|
|
|
|
|
type = types.nullOr types.str;
|
|
|
|
|
default = null;
|
|
|
|
|
description = ''
|
|
|
|
|
Specify a user for running the application. If null, a new
|
|
|
|
|
user is created.
|
|
|
|
|
'';
|
|
|
|
|
};
|
2020-06-28 11:36:46 +00:00
|
|
|
|
waitForTarget = mkOption {
|
|
|
|
|
type = types.nullOr types.str;
|
|
|
|
|
default = null;
|
|
|
|
|
description = ''
|
|
|
|
|
If not null, joex depends on this systemd target. This is
|
|
|
|
|
useful if full-text-search is enabled and the solr instance
|
|
|
|
|
is running on the same machine.
|
|
|
|
|
'';
|
|
|
|
|
};
|
2020-09-08 16:07:04 +00:00
|
|
|
|
jvmArgs = mkOption {
|
|
|
|
|
type = types.listOf types.str;
|
|
|
|
|
default = [];
|
|
|
|
|
example = [ "-J-Xmx1G" ];
|
|
|
|
|
description = "The options passed to the executable for setting jvm arguments.";
|
|
|
|
|
};
|
2020-06-28 11:36:46 +00:00
|
|
|
|
|
2020-01-19 21:53:58 +00:00
|
|
|
|
|
|
|
|
|
app-id = mkOption {
|
|
|
|
|
type = types.str;
|
2020-01-22 22:11:02 +00:00
|
|
|
|
default = defaults.app-id;
|
2020-01-19 21:53:58 +00:00
|
|
|
|
description = "The node id. Must be unique across all docspell nodes.";
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
base-url = mkOption {
|
|
|
|
|
type = types.str;
|
2020-01-22 22:11:02 +00:00
|
|
|
|
default = defaults.base-url;
|
2020-01-19 21:53:58 +00:00
|
|
|
|
description = "The base url where attentive is deployed.";
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
bind = mkOption {
|
|
|
|
|
type = types.submodule({
|
|
|
|
|
options = {
|
|
|
|
|
address = mkOption {
|
|
|
|
|
type = types.str;
|
2020-01-22 22:11:02 +00:00
|
|
|
|
default = defaults.bind.address;
|
2020-01-19 21:53:58 +00:00
|
|
|
|
description = "The address to bind the REST server to.";
|
|
|
|
|
};
|
|
|
|
|
port = mkOption {
|
|
|
|
|
type = types.int;
|
2020-01-22 22:11:02 +00:00
|
|
|
|
default = defaults.bind.port;
|
2020-01-19 21:53:58 +00:00
|
|
|
|
description = "The port to bind the REST server";
|
|
|
|
|
};
|
|
|
|
|
};
|
|
|
|
|
});
|
2020-01-22 22:11:02 +00:00
|
|
|
|
default = defaults.bind;
|
2020-01-19 21:53:58 +00:00
|
|
|
|
description = "Address and port bind the rest server.";
|
|
|
|
|
};
|
2020-05-25 13:32:03 +00:00
|
|
|
|
mail-debug = mkOption {
|
|
|
|
|
type = types.bool;
|
|
|
|
|
default = defaults.mail-debug;
|
|
|
|
|
description = ''
|
|
|
|
|
Enable or disable debugging for e-mail related functionality. This
|
|
|
|
|
applies to both sending and receiving mails. For security reasons
|
|
|
|
|
logging is not very extensive on authentication failures. Setting
|
|
|
|
|
this to true, results in a lot of data printed to stdout.
|
|
|
|
|
'';
|
|
|
|
|
};
|
2020-01-19 21:53:58 +00:00
|
|
|
|
|
|
|
|
|
jdbc = mkOption {
|
|
|
|
|
type = types.submodule ({
|
|
|
|
|
options = {
|
|
|
|
|
url = mkOption {
|
|
|
|
|
type = types.str;
|
2020-01-22 22:11:02 +00:00
|
|
|
|
default = defaults.jdbc.url;
|
2020-01-19 21:53:58 +00:00
|
|
|
|
description = ''
|
|
|
|
|
The URL to the database. By default a file-based database is
|
|
|
|
|
used. It should also work with mariadb and postgresql.
|
|
|
|
|
|
|
|
|
|
Examples:
|
|
|
|
|
"jdbc:mariadb://192.168.1.172:3306/docspell"
|
|
|
|
|
"jdbc:postgresql://localhost:5432/docspell"
|
|
|
|
|
"jdbc:h2:///home/dbs/docspell.db;MODE=PostgreSQL;DATABASE_TO_LOWER=TRUE;AUTO_SERVER=TRUE"
|
|
|
|
|
|
|
|
|
|
'';
|
|
|
|
|
};
|
|
|
|
|
user = mkOption {
|
|
|
|
|
type = types.str;
|
2020-01-22 22:11:02 +00:00
|
|
|
|
default = defaults.jdbc.user;
|
2020-01-19 21:53:58 +00:00
|
|
|
|
description = "The user name to connect to the database.";
|
|
|
|
|
};
|
|
|
|
|
password = mkOption {
|
|
|
|
|
type = types.str;
|
2020-01-22 22:11:02 +00:00
|
|
|
|
default = defaults.jdbc.password;
|
2020-01-19 21:53:58 +00:00
|
|
|
|
description = "The password to connect to the database.";
|
|
|
|
|
};
|
|
|
|
|
};
|
|
|
|
|
});
|
2020-01-22 22:11:02 +00:00
|
|
|
|
default = defaults.jdbc;
|
2020-01-19 21:53:58 +00:00
|
|
|
|
description = "Database connection settings";
|
|
|
|
|
};
|
|
|
|
|
|
2020-04-30 19:28:28 +00:00
|
|
|
|
send-mail = mkOption {
|
|
|
|
|
type = types.submodule({
|
|
|
|
|
options = {
|
|
|
|
|
list-id = mkOption {
|
|
|
|
|
type = types.str;
|
|
|
|
|
default = defaults.send-mail.list-id;
|
|
|
|
|
description = ''
|
|
|
|
|
This is used as the List-Id e-mail header when mails are sent
|
|
|
|
|
from docspell to its users (example: for notification mails). It
|
|
|
|
|
is not used when sending to external recipients. If it is empty,
|
|
|
|
|
no such header is added. Using this header is often useful when
|
|
|
|
|
filtering mails.
|
|
|
|
|
|
|
|
|
|
It should be a string in angle brackets. See
|
|
|
|
|
https://tools.ietf.org/html/rfc2919 for a formal specification
|
|
|
|
|
'';
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
};
|
|
|
|
|
});
|
|
|
|
|
default = defaults.send-mail;
|
|
|
|
|
description = "Settings for sending mails.";
|
|
|
|
|
};
|
|
|
|
|
|
2020-01-19 21:53:58 +00:00
|
|
|
|
scheduler = mkOption {
|
|
|
|
|
type = types.submodule({
|
|
|
|
|
options = {
|
|
|
|
|
pool-size = mkOption {
|
|
|
|
|
type = types.int;
|
2020-01-22 22:11:02 +00:00
|
|
|
|
default = defaults.scheduler.pool-size;
|
2020-01-19 21:53:58 +00:00
|
|
|
|
description = "Number of processing allowed in parallel.";
|
|
|
|
|
};
|
|
|
|
|
counting-scheme = mkOption {
|
|
|
|
|
type = types.str;
|
2020-01-22 22:11:02 +00:00
|
|
|
|
default = defaults.scheduler.counting-scheme;
|
2020-01-19 21:53:58 +00:00
|
|
|
|
description = ''
|
|
|
|
|
A counting scheme determines the ratio of how high- and low-prio
|
|
|
|
|
jobs are run. For example: 4,1 means run 4 high prio jobs, then
|
|
|
|
|
1 low prio and then start over.
|
|
|
|
|
'';
|
|
|
|
|
};
|
|
|
|
|
retries = mkOption {
|
|
|
|
|
type = types.int;
|
2020-01-22 22:11:02 +00:00
|
|
|
|
default = defaults.scheduler.retries;
|
2020-01-19 21:53:58 +00:00
|
|
|
|
description = ''
|
|
|
|
|
How often a failed job should be retried until it enters failed
|
|
|
|
|
state. If a job fails, it becomes "stuck" and will be retried
|
|
|
|
|
after a delay.
|
|
|
|
|
'';
|
|
|
|
|
};
|
|
|
|
|
retry-delay = mkOption {
|
|
|
|
|
type = types.str;
|
2020-01-22 22:11:02 +00:00
|
|
|
|
default = defaults.scheduler.retry-delay;
|
2020-01-19 21:53:58 +00:00
|
|
|
|
description = ''
|
|
|
|
|
The delay until the next try is performed for a failed job. This
|
|
|
|
|
delay is increased exponentially with the number of retries.
|
|
|
|
|
'';
|
|
|
|
|
};
|
|
|
|
|
log-buffer-size = mkOption {
|
|
|
|
|
type = types.int;
|
2020-01-22 22:11:02 +00:00
|
|
|
|
default = defaults.scheduler.log-buffer-size;
|
2020-01-19 21:53:58 +00:00
|
|
|
|
description = ''
|
|
|
|
|
The queue size of log statements from a job.
|
|
|
|
|
'';
|
|
|
|
|
};
|
|
|
|
|
wakeup-period = mkOption {
|
|
|
|
|
type = types.str;
|
2020-01-22 22:11:02 +00:00
|
|
|
|
default = defaults.scheduler.wakeup-period;
|
2020-01-19 21:53:58 +00:00
|
|
|
|
description = ''
|
|
|
|
|
If no job is left in the queue, the scheduler will wait until a
|
|
|
|
|
notify is requested (using the REST interface). To also retry
|
|
|
|
|
stuck jobs, it will notify itself periodically.
|
|
|
|
|
'';
|
|
|
|
|
};
|
|
|
|
|
};
|
|
|
|
|
});
|
2020-01-22 22:11:02 +00:00
|
|
|
|
default = defaults.scheduler;
|
2020-01-19 21:53:58 +00:00
|
|
|
|
description = "Settings for the scheduler";
|
|
|
|
|
};
|
|
|
|
|
|
2020-03-27 19:09:49 +00:00
|
|
|
|
periodic-scheduler = mkOption {
|
|
|
|
|
type = types.submodule({
|
|
|
|
|
options = {
|
|
|
|
|
wakeup-period = mkOption {
|
|
|
|
|
type = types.str;
|
|
|
|
|
default = defaults.periodic-scheduler.wakeup-period;
|
|
|
|
|
description = ''
|
|
|
|
|
A fallback to start looking for due periodic tasks regularily.
|
|
|
|
|
Usually joex instances should be notified via REST calls if
|
|
|
|
|
external processes change tasks. But these requests may get
|
|
|
|
|
lost.
|
|
|
|
|
'';
|
|
|
|
|
};
|
|
|
|
|
};
|
|
|
|
|
});
|
|
|
|
|
default = defaults.periodic-scheduler;
|
|
|
|
|
description = ''
|
|
|
|
|
Settings for the periodic scheduler.
|
|
|
|
|
'';
|
|
|
|
|
};
|
|
|
|
|
|
2020-05-25 13:32:03 +00:00
|
|
|
|
user-tasks = mkOption {
|
|
|
|
|
type = types.submodule({
|
|
|
|
|
options = {
|
|
|
|
|
scan-mailbox = mkOption {
|
|
|
|
|
type = types.submodule({
|
|
|
|
|
options = {
|
|
|
|
|
max-folders = mkOption {
|
|
|
|
|
type = types.int;
|
|
|
|
|
default = defaults.user-tasks.scan-mailbox.max-folders;
|
|
|
|
|
description = ''
|
|
|
|
|
A limit of how many folders to scan through. If a user
|
|
|
|
|
configures more than this, only upto this limit folders are
|
|
|
|
|
scanned and a warning is logged.
|
|
|
|
|
'';
|
|
|
|
|
};
|
|
|
|
|
mail-chunk-size = mkOption {
|
|
|
|
|
type = types.int;
|
|
|
|
|
default = defaults.user-tasks.scan-mailbox.mail-chunk-size;
|
|
|
|
|
description = ''
|
|
|
|
|
How many mails (headers only) to retrieve in one chunk.
|
|
|
|
|
|
|
|
|
|
If this is greater than `max-mails' it is set automatically to
|
|
|
|
|
the value of `max-mails'.
|
|
|
|
|
'';
|
|
|
|
|
};
|
|
|
|
|
max-mails = mkOption {
|
|
|
|
|
type = types.int;
|
|
|
|
|
default = defaults.user-tasks.scan-mailbox.max-mails;
|
|
|
|
|
description = ''
|
|
|
|
|
A limit on how many mails to process in one job run. This is
|
|
|
|
|
meant to avoid too heavy resource allocation to one
|
|
|
|
|
user/collective.
|
|
|
|
|
|
|
|
|
|
If more than this number of mails is encountered, a warning is
|
|
|
|
|
logged.
|
|
|
|
|
'';
|
|
|
|
|
};
|
|
|
|
|
};
|
|
|
|
|
});
|
|
|
|
|
default = defaults.user-tasks.scan-mailbox;
|
|
|
|
|
description = "Allows to import e-mails by scanning a mailbox.";
|
|
|
|
|
};
|
|
|
|
|
};
|
|
|
|
|
});
|
|
|
|
|
default = defaults.user-tasks;
|
|
|
|
|
description = "Configuration for the user tasks.";
|
|
|
|
|
};
|
|
|
|
|
|
2020-03-27 19:09:49 +00:00
|
|
|
|
house-keeping = mkOption {
|
|
|
|
|
type = types.submodule({
|
|
|
|
|
options = {
|
|
|
|
|
schedule = mkOption {
|
|
|
|
|
type = types.str;
|
|
|
|
|
default = defaults.house-keeping.schedule;
|
|
|
|
|
description = ''
|
|
|
|
|
When the house keeping tasks execute. Default is to run every
|
|
|
|
|
week.
|
|
|
|
|
'';
|
|
|
|
|
};
|
|
|
|
|
cleanup-invites = mkOption {
|
|
|
|
|
type = types.submodule({
|
|
|
|
|
options = {
|
|
|
|
|
enabled = mkOption {
|
|
|
|
|
type = types.bool;
|
|
|
|
|
default = defaults.house-keeping.cleanup-invites.enabled;
|
|
|
|
|
description = "Whether this task is enabled.";
|
|
|
|
|
};
|
|
|
|
|
older-than = mkOption {
|
|
|
|
|
type = types.str;
|
|
|
|
|
default = defaults.house-keeping.cleanup-invites.older-than;
|
|
|
|
|
description = "The minimum age of invites to be deleted.";
|
|
|
|
|
};
|
|
|
|
|
};
|
|
|
|
|
});
|
|
|
|
|
default = defaults.house-keeping.cleanup-invites;
|
|
|
|
|
description = ''
|
|
|
|
|
This task removes invitation keys that have been created but not
|
|
|
|
|
used. The timespan here must be greater than the `invite-time'
|
|
|
|
|
setting in the rest server config file.
|
|
|
|
|
'';
|
|
|
|
|
};
|
|
|
|
|
cleanup-jobs = mkOption {
|
|
|
|
|
type = types.submodule({
|
|
|
|
|
options = {
|
|
|
|
|
enabled = mkOption {
|
|
|
|
|
type = types.bool;
|
|
|
|
|
default = defaults.house-keeping.cleanup-jobs.enabled;
|
|
|
|
|
description = "Whether this task is enabled.";
|
|
|
|
|
};
|
|
|
|
|
older-than = mkOption {
|
|
|
|
|
type = types.str;
|
|
|
|
|
default = defaults.house-keeping.cleanup-jobs.older-than;
|
|
|
|
|
description = ''
|
|
|
|
|
The minimum age of jobs to delete. It is matched against the
|
|
|
|
|
`finished' timestamp.
|
|
|
|
|
'';
|
|
|
|
|
};
|
|
|
|
|
delete-batch = mkOption {
|
|
|
|
|
type = types.int;
|
|
|
|
|
default = defauts.house-keeping.cleanup-jobs.delete-batch;
|
|
|
|
|
description = ''
|
|
|
|
|
This defines how many jobs are deleted in one transaction.
|
|
|
|
|
Since the data to delete may get large, it can be configured
|
|
|
|
|
whether more or less memory should be used.
|
|
|
|
|
'';
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
};
|
|
|
|
|
});
|
|
|
|
|
default = defaults.house-keeping.cleanup-jobs;
|
|
|
|
|
description = ''
|
|
|
|
|
Jobs store their log output in the database. Normally this data
|
|
|
|
|
is only interesting for some period of time. The processing logs
|
|
|
|
|
of old files can be removed eventually.
|
|
|
|
|
'';
|
|
|
|
|
};
|
2020-12-14 13:51:56 +00:00
|
|
|
|
cleanup-remember-me = mkOption {
|
|
|
|
|
type = types.submodule({
|
|
|
|
|
options = {
|
|
|
|
|
enabled = mkOption {
|
|
|
|
|
type = types.bool;
|
|
|
|
|
default = defaults.house-keeping.cleanup-remember-me.enabled;
|
|
|
|
|
description = "Whether this task is enabled.";
|
|
|
|
|
};
|
|
|
|
|
older-than = mkOption {
|
|
|
|
|
type = types.str;
|
|
|
|
|
default = defaults.house-keeping.cleanup-remember-me.older-than;
|
|
|
|
|
description = "The miminum age of remember me tokens to delete.";
|
|
|
|
|
};
|
|
|
|
|
};
|
|
|
|
|
});
|
|
|
|
|
default = defaults.house-keeping.cleanup-remember-me;
|
|
|
|
|
description = "Settings for cleaning up remember me tokens.";
|
|
|
|
|
};
|
2020-03-27 19:09:49 +00:00
|
|
|
|
};
|
|
|
|
|
});
|
|
|
|
|
default = defaults.house-keeping;
|
|
|
|
|
description = ''
|
|
|
|
|
Docspell uses periodic house keeping tasks, like cleaning expired
|
|
|
|
|
invites, that can be configured here.
|
|
|
|
|
'';
|
|
|
|
|
};
|
|
|
|
|
|
2020-01-22 22:11:02 +00:00
|
|
|
|
extraction = mkOption {
|
2020-01-19 21:53:58 +00:00
|
|
|
|
type = types.submodule({
|
|
|
|
|
options = {
|
2020-02-22 11:40:56 +00:00
|
|
|
|
pdf = mkOption {
|
2020-01-19 21:53:58 +00:00
|
|
|
|
type = types.submodule({
|
|
|
|
|
options = {
|
2020-02-22 11:40:56 +00:00
|
|
|
|
min-text-len = mkOption {
|
2020-01-19 21:53:58 +00:00
|
|
|
|
type = types.int;
|
2020-02-22 11:40:56 +00:00
|
|
|
|
default = defaults.extraction.pdf.min-text-len;
|
|
|
|
|
description = ''
|
|
|
|
|
For PDF files it is first tried to read the text parts of the
|
|
|
|
|
PDF. But PDFs can be complex documents and they may contain text
|
|
|
|
|
and images. If the returned text is shorter than the value
|
|
|
|
|
below, OCR is run afterwards. Then both extracted texts are
|
|
|
|
|
compared and the longer will be used.
|
|
|
|
|
'';
|
2020-01-19 21:53:58 +00:00
|
|
|
|
};
|
|
|
|
|
};
|
|
|
|
|
});
|
2020-02-22 11:40:56 +00:00
|
|
|
|
default = defaults.extraction.pdf;
|
|
|
|
|
description = "Settings for PDF extraction";
|
|
|
|
|
};
|
2020-11-14 15:02:39 +00:00
|
|
|
|
preview = mkOption {
|
|
|
|
|
type = types.submodule({
|
|
|
|
|
options = {
|
|
|
|
|
dpi = mkOption {
|
|
|
|
|
type = types.int;
|
|
|
|
|
default = defaults.extraction.preview.dpi;
|
|
|
|
|
description = ''
|
|
|
|
|
When rendering a pdf page, use this dpi. This results in
|
|
|
|
|
scaling the image. A standard A4 page rendered at 96dpi
|
|
|
|
|
results in roughly 790x1100px image. Using 32 results in
|
|
|
|
|
roughly 200x300px image.
|
|
|
|
|
|
|
|
|
|
Note, when this is changed, you might want to re-generate
|
|
|
|
|
preview images. Check the api for this, there is an endpoint
|
|
|
|
|
to regenerate all for a collective.
|
|
|
|
|
'';
|
|
|
|
|
};
|
|
|
|
|
};
|
|
|
|
|
});
|
|
|
|
|
default = defaults.extraction.preview;
|
|
|
|
|
description = "";
|
|
|
|
|
};
|
2020-02-22 11:40:56 +00:00
|
|
|
|
ocr = mkOption {
|
|
|
|
|
type = types.submodule({
|
|
|
|
|
options = {
|
|
|
|
|
max-image-size = mkOption {
|
|
|
|
|
type = types.int;
|
|
|
|
|
default = defaults.extraction.ocr.max-image-size;
|
|
|
|
|
description = ''
|
|
|
|
|
Images greater than this size are skipped. Note that every
|
|
|
|
|
image is loaded completely into memory for doing OCR.
|
|
|
|
|
'';
|
|
|
|
|
};
|
|
|
|
|
page-range = mkOption {
|
|
|
|
|
type = types.submodule({
|
|
|
|
|
options = {
|
|
|
|
|
begin = mkOption {
|
|
|
|
|
type = types.int;
|
|
|
|
|
default = defaults.extraction.page-range.begin;
|
|
|
|
|
description = "Specifies the first N pages of a file to process.";
|
|
|
|
|
};
|
|
|
|
|
};
|
|
|
|
|
});
|
|
|
|
|
default = defaults.extraction.page-range;
|
|
|
|
|
description = ''
|
|
|
|
|
Defines what pages to process. If a PDF with 600 pages is
|
|
|
|
|
submitted, it is probably not necessary to scan through all of
|
|
|
|
|
them. This would take a long time and occupy resources for no
|
|
|
|
|
value. The first few pages should suffice. The default is first
|
|
|
|
|
10 pages.
|
|
|
|
|
|
|
|
|
|
If you want all pages being processed, set this number to -1.
|
|
|
|
|
|
|
|
|
|
Note: if you change the ghostscript command below, be aware that
|
|
|
|
|
this setting (if not -1) will add another parameter to the
|
|
|
|
|
beginning of the command.
|
|
|
|
|
'';
|
|
|
|
|
};
|
|
|
|
|
ghostscript = mkOption {
|
|
|
|
|
type = types.submodule({
|
|
|
|
|
options = {
|
|
|
|
|
working-dir = mkOption {
|
|
|
|
|
type = types.str;
|
|
|
|
|
default = defaults.extraction.ghostscript.working-dir;
|
|
|
|
|
description = "Directory where the extraction processes can put their temp files";
|
|
|
|
|
};
|
|
|
|
|
command = mkOption {
|
|
|
|
|
type = types.submodule({
|
|
|
|
|
options = {
|
|
|
|
|
program = mkOption {
|
|
|
|
|
type = types.str;
|
|
|
|
|
default = defaults.extraction.ghostscript.command.program;
|
|
|
|
|
description = "The path to the executable.";
|
|
|
|
|
};
|
|
|
|
|
args = mkOption {
|
|
|
|
|
type = types.listOf types.str;
|
|
|
|
|
default = defaults.extraction.ghostscript.command.args;
|
|
|
|
|
description = "The arguments to the program";
|
|
|
|
|
};
|
|
|
|
|
timeout = mkOption {
|
|
|
|
|
type = types.str;
|
|
|
|
|
default = defaults.extraction.ghostscript.command.timeout;
|
|
|
|
|
description = "The timeout when executing the command";
|
|
|
|
|
};
|
|
|
|
|
};
|
|
|
|
|
});
|
|
|
|
|
default = defaults.extraction.ghostscript.command;
|
|
|
|
|
description = "The system command";
|
|
|
|
|
};
|
|
|
|
|
};
|
|
|
|
|
});
|
|
|
|
|
default = defaults.extraction.ghostscript;
|
|
|
|
|
description = "The ghostscript command.";
|
|
|
|
|
};
|
|
|
|
|
unpaper = mkOption {
|
|
|
|
|
type = types.submodule({
|
|
|
|
|
options = {
|
|
|
|
|
command = mkOption {
|
|
|
|
|
type = types.submodule({
|
|
|
|
|
options = {
|
|
|
|
|
program = mkOption {
|
|
|
|
|
type = types.str;
|
|
|
|
|
default = defaults.extraction.unpaper.command.program;
|
|
|
|
|
description = "The path to the executable.";
|
|
|
|
|
};
|
|
|
|
|
args = mkOption {
|
|
|
|
|
type = types.listOf types.str;
|
|
|
|
|
default = defaults.extraction.unpaper.command.args;
|
|
|
|
|
description = "The arguments to the program";
|
|
|
|
|
};
|
|
|
|
|
timeout = mkOption {
|
|
|
|
|
type = types.str;
|
|
|
|
|
default = defaults.extraction.unpaper.command.timeout;
|
|
|
|
|
description = "The timeout when executing the command";
|
|
|
|
|
};
|
|
|
|
|
};
|
|
|
|
|
});
|
|
|
|
|
default = defaults.extraction.unpaper.command;
|
|
|
|
|
description = "The system command";
|
|
|
|
|
};
|
|
|
|
|
};
|
|
|
|
|
});
|
|
|
|
|
default = defaults.extraction.unpaper;
|
|
|
|
|
description = "The unpaper command.";
|
|
|
|
|
};
|
|
|
|
|
tesseract = mkOption {
|
|
|
|
|
type = types.submodule({
|
|
|
|
|
options = {
|
|
|
|
|
command = mkOption {
|
|
|
|
|
type = types.submodule({
|
|
|
|
|
options = {
|
|
|
|
|
program = mkOption {
|
|
|
|
|
type = types.str;
|
|
|
|
|
default = defaults.extraction.tesseract.command.program;
|
|
|
|
|
description = "The path to the executable.";
|
|
|
|
|
};
|
|
|
|
|
args = mkOption {
|
|
|
|
|
type = types.listOf types.str;
|
|
|
|
|
default = defaults.extraction.tesseract.command.args;
|
|
|
|
|
description = "The arguments to the program";
|
|
|
|
|
};
|
|
|
|
|
timeout = mkOption {
|
|
|
|
|
type = types.str;
|
|
|
|
|
default = defaults.extraction.tesseract.command.timeout;
|
|
|
|
|
description = "The timeout when executing the command";
|
|
|
|
|
};
|
|
|
|
|
};
|
|
|
|
|
});
|
|
|
|
|
default = defaults.extraction.tesseract.command;
|
|
|
|
|
description = "The system command";
|
|
|
|
|
};
|
|
|
|
|
};
|
|
|
|
|
});
|
|
|
|
|
default = defaults.extraction.tesseract;
|
|
|
|
|
description = "The tesseract command.";
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
};
|
|
|
|
|
});
|
|
|
|
|
default = defaults.extraction.ocr;
|
|
|
|
|
description = "";
|
|
|
|
|
};
|
|
|
|
|
};
|
|
|
|
|
});
|
|
|
|
|
default = defaults.extraction;
|
|
|
|
|
description = ''
|
|
|
|
|
Configuration of text extraction
|
|
|
|
|
|
|
|
|
|
Extracting text currently only work for image and pdf files. It
|
|
|
|
|
will first runs ghostscript to create a gray image from a
|
|
|
|
|
pdf. Then unpaper is run to optimize the image for the upcoming
|
|
|
|
|
ocr, which will be done by tesseract. All these programs must be
|
|
|
|
|
available in your PATH or the absolute path can be specified
|
|
|
|
|
below.
|
|
|
|
|
'';
|
|
|
|
|
};
|
2020-01-19 21:53:58 +00:00
|
|
|
|
|
2020-03-27 21:54:49 +00:00
|
|
|
|
text-analysis = mkOption {
|
|
|
|
|
type = types.submodule({
|
|
|
|
|
options = {
|
|
|
|
|
max-length = mkOption {
|
|
|
|
|
type = types.int;
|
|
|
|
|
default = defaults.text-analysis.max-length;
|
|
|
|
|
description = ''
|
|
|
|
|
Maximum length of text to be analysed.
|
|
|
|
|
|
|
|
|
|
All text to analyse must fit into RAM. A large document may take
|
|
|
|
|
too much heap. Also, most important information is at the
|
|
|
|
|
beginning of a document, so in most cases the first two pages
|
|
|
|
|
should suffice. Default is 10000, which are about 2-3 pages
|
|
|
|
|
(a rough guess).
|
|
|
|
|
'';
|
|
|
|
|
};
|
2020-08-24 21:25:57 +00:00
|
|
|
|
working-dir = mkOption {
|
|
|
|
|
type = types.str;
|
|
|
|
|
default = defaults.text-analysis.working-dir;
|
|
|
|
|
description = ''
|
|
|
|
|
A working directory for the analyser to store temporary/working
|
|
|
|
|
files.
|
|
|
|
|
'';
|
|
|
|
|
};
|
2020-03-27 21:54:49 +00:00
|
|
|
|
|
2021-01-16 22:43:24 +00:00
|
|
|
|
nlp = mkOption {
|
2020-08-24 21:25:57 +00:00
|
|
|
|
type = types.submodule({
|
|
|
|
|
options = {
|
2021-01-16 22:43:24 +00:00
|
|
|
|
mode = mkOption {
|
|
|
|
|
type = types.str;
|
|
|
|
|
default = defaults.text-analysis.nlp.mode;
|
2020-08-24 21:25:57 +00:00
|
|
|
|
description = ''
|
2021-01-16 22:43:24 +00:00
|
|
|
|
The mode for configuring NLP models:
|
|
|
|
|
|
|
|
|
|
1. full – builds the complete pipeline
|
|
|
|
|
2. basic - builds only the ner annotator
|
|
|
|
|
3. regexonly - matches each entry in your address book via regexps
|
|
|
|
|
4. disabled - doesn't use any stanford-nlp feature
|
|
|
|
|
|
|
|
|
|
The full and basic variants rely on pre-build language models
|
|
|
|
|
that are available for only 3 lanugages at the moment: German,
|
|
|
|
|
English and French.
|
|
|
|
|
|
|
|
|
|
Memory usage varies greatly among the languages. German has
|
|
|
|
|
quite large models, that require about 1G heap. So joex should
|
|
|
|
|
run with -Xmx1500M at least when using mode=full.
|
|
|
|
|
|
|
|
|
|
The basic variant does a quite good job for German and
|
|
|
|
|
English. It might be worse for French, always depending on the
|
|
|
|
|
type of text that is analysed. Joex should run with about 600M
|
|
|
|
|
heap, here again lanugage German uses the most.
|
|
|
|
|
|
|
|
|
|
The regexonly variant doesn't depend on a language. It roughly
|
|
|
|
|
works by converting all entries in your addressbook into
|
|
|
|
|
regexps and matches each one against the text. This can get
|
|
|
|
|
memory intensive, too, when the addressbook grows large. This
|
|
|
|
|
is included in the full and basic by default, but can be used
|
|
|
|
|
independently by setting mode=regexner.
|
|
|
|
|
|
|
|
|
|
When mode=disabled, then the whole nlp pipeline is disabled,
|
|
|
|
|
and you won't get any suggestions. Only what the classifier
|
|
|
|
|
returns (if enabled).
|
2020-08-24 21:25:57 +00:00
|
|
|
|
'';
|
|
|
|
|
};
|
2021-01-16 22:43:24 +00:00
|
|
|
|
|
|
|
|
|
clear-interval = mkOption {
|
2020-08-24 21:25:57 +00:00
|
|
|
|
type = types.str;
|
2021-01-16 22:43:24 +00:00
|
|
|
|
default = defaults.text-analysis.nlp.clear-interval;
|
2020-08-24 21:25:57 +00:00
|
|
|
|
description = ''
|
2021-01-16 22:43:24 +00:00
|
|
|
|
Idle time after which the NLP caches are cleared to free
|
|
|
|
|
memory. If <= 0 clearing the cache is disabled.
|
2020-08-24 21:25:57 +00:00
|
|
|
|
'';
|
|
|
|
|
};
|
2021-01-16 22:43:24 +00:00
|
|
|
|
|
|
|
|
|
regex-ner = mkOption {
|
|
|
|
|
type = types.submodule({
|
|
|
|
|
options = {
|
|
|
|
|
enabled = mkOption {
|
|
|
|
|
type = types.int;
|
|
|
|
|
default = defaults.text-analysis.regex-ner.max-entries;
|
|
|
|
|
description = ''
|
|
|
|
|
Whether to enable custom NER annotation. This uses the
|
|
|
|
|
address book of a collective as input for NER tagging (to
|
|
|
|
|
automatically find correspondent and concerned entities). If
|
|
|
|
|
the address book is large, this can be quite memory
|
|
|
|
|
intensive and also makes text analysis much slower. But it
|
|
|
|
|
improves accuracy and can be used independent of the
|
|
|
|
|
lanugage. If this is set to 0, it is effectively disabled
|
|
|
|
|
and NER tagging uses only statistical models (that also work
|
|
|
|
|
quite well, but are restricted to the languages mentioned
|
|
|
|
|
above).
|
|
|
|
|
|
|
|
|
|
Note, this is only relevant if nlp-config.mode is not
|
|
|
|
|
"disabled".
|
|
|
|
|
'';
|
|
|
|
|
};
|
|
|
|
|
file-cache-time = mkOption {
|
|
|
|
|
type = types.str;
|
|
|
|
|
default = defaults.text-analysis.ner-file-cache-time;
|
|
|
|
|
description = ''
|
|
|
|
|
The NER annotation uses a file of patterns that is derived from
|
|
|
|
|
a collective's address book. This is is the time how long this
|
|
|
|
|
file will be kept until a check for a state change is done.
|
|
|
|
|
'';
|
|
|
|
|
};
|
|
|
|
|
};
|
|
|
|
|
});
|
|
|
|
|
default = defaults.text-analysis.nlp.regex-ner;
|
|
|
|
|
description = "";
|
|
|
|
|
};
|
2020-08-24 21:25:57 +00:00
|
|
|
|
};
|
|
|
|
|
});
|
2021-01-16 22:43:24 +00:00
|
|
|
|
default = defaults.text-analysis.nlp;
|
|
|
|
|
description = "Configure NLP";
|
2020-08-24 21:25:57 +00:00
|
|
|
|
};
|
2020-09-02 20:23:08 +00:00
|
|
|
|
|
|
|
|
|
classification = mkOption {
|
|
|
|
|
type = types.submodule({
|
|
|
|
|
options = {
|
|
|
|
|
enabled = mkOption {
|
|
|
|
|
type = types.bool;
|
|
|
|
|
default = defaults.text-analysis.classification.enabled;
|
|
|
|
|
description = ''
|
|
|
|
|
Whether to enable classification globally. Each collective can
|
|
|
|
|
decide to disable it. If it is disabled here, no collective
|
|
|
|
|
can use classification.
|
|
|
|
|
'';
|
|
|
|
|
};
|
|
|
|
|
item-count = mkOption {
|
|
|
|
|
type = types.int;
|
|
|
|
|
default = defaults.text-analysis.classification.item-count;
|
|
|
|
|
description = ''
|
|
|
|
|
If concerned with memory consumption, this restricts the
|
|
|
|
|
number of items to consider. More are better for training. A
|
|
|
|
|
negative value or zero means no train on all items.
|
|
|
|
|
'';
|
|
|
|
|
};
|
|
|
|
|
classifiers = mkOption {
|
|
|
|
|
type = types.listOf types.attrs;
|
|
|
|
|
default = defaults.text-analysis.classification.classifiers;
|
|
|
|
|
description = ''
|
|
|
|
|
These settings are used to configure the classifier. If
|
|
|
|
|
multiple are given, they are all tried and the "best" is
|
|
|
|
|
chosen at the end. See
|
|
|
|
|
https://nlp.stanford.edu/nlp/javadoc/javanlp/edu/stanford/nlp/classify/ColumnDataClassifier.html
|
|
|
|
|
for more info about these settings. The settings here yielded
|
|
|
|
|
good results with *my* dataset.
|
|
|
|
|
'';
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
};
|
|
|
|
|
});
|
|
|
|
|
default = defaults.text-analysis.classification;
|
|
|
|
|
description = ''
|
|
|
|
|
Settings for doing document classification.
|
|
|
|
|
|
|
|
|
|
This works by learning from existing documents. A collective can
|
|
|
|
|
specify a tag category and the system will try to predict a tag
|
|
|
|
|
from this category for new incoming documents.
|
|
|
|
|
|
|
|
|
|
This requires a satstical model that is computed from all
|
|
|
|
|
existing documents. This process is run periodically as
|
|
|
|
|
configured by the collective. It may require a lot of memory,
|
|
|
|
|
depending on the amount of data.
|
|
|
|
|
|
|
|
|
|
It utilises this NLP library: https://nlp.stanford.edu/.
|
|
|
|
|
'';
|
|
|
|
|
};
|
2020-03-27 21:54:49 +00:00
|
|
|
|
};
|
|
|
|
|
});
|
|
|
|
|
default = defaults.text-analysis;
|
|
|
|
|
description = "Settings for text analysis";
|
|
|
|
|
};
|
|
|
|
|
|
2020-06-25 23:27:11 +00:00
|
|
|
|
processing = mkOption {
|
|
|
|
|
type = types.submodule({
|
|
|
|
|
options = {
|
|
|
|
|
max-due-date-years = mkOption {
|
|
|
|
|
type = types.int;
|
|
|
|
|
default = defaults.processing.max-due-date-years;
|
|
|
|
|
description = ''
|
|
|
|
|
Restricts proposals for due dates. Only dates earlier than this
|
|
|
|
|
number of years in the future are considered.
|
|
|
|
|
'';
|
|
|
|
|
};
|
|
|
|
|
};
|
|
|
|
|
});
|
|
|
|
|
default = defaults.processing;
|
|
|
|
|
description = "General config for processing documents";
|
|
|
|
|
};
|
|
|
|
|
|
2020-02-22 11:40:56 +00:00
|
|
|
|
convert = mkOption {
|
|
|
|
|
type = types.submodule({
|
|
|
|
|
options = {
|
|
|
|
|
chunk-size = mkOption {
|
|
|
|
|
type = types.int;
|
|
|
|
|
default = defaults.convert.chunk-size;
|
|
|
|
|
description = ''
|
|
|
|
|
The chunk size used when storing files. This should be the same
|
|
|
|
|
as used with the rest server.
|
|
|
|
|
'';
|
|
|
|
|
};
|
2020-11-14 15:02:39 +00:00
|
|
|
|
converted-filename-part = mkOption {
|
|
|
|
|
type = types.str;
|
|
|
|
|
default = defaults.convert.converted-filename-part;
|
|
|
|
|
description = ''
|
|
|
|
|
A string used to change the filename of the converted pdf file.
|
|
|
|
|
If empty, the original file name is used for the pdf file ( the
|
|
|
|
|
extension is always replaced with `pdf`).
|
|
|
|
|
'';
|
|
|
|
|
};
|
|
|
|
|
|
2020-02-22 11:40:56 +00:00
|
|
|
|
max-image-size = mkOption {
|
|
|
|
|
type = types.int;
|
|
|
|
|
default = defaults.convert.max-image-size;
|
|
|
|
|
description = ''
|
|
|
|
|
When reading images, this is the maximum size. Images that are
|
|
|
|
|
larger are not processed.
|
|
|
|
|
'';
|
|
|
|
|
};
|
|
|
|
|
markdown = mkOption {
|
|
|
|
|
type = types.submodule({
|
|
|
|
|
options = {
|
|
|
|
|
internal-css = mkOption {
|
|
|
|
|
type = types.str;
|
|
|
|
|
default = defaults.convert.markdown.internal-css;
|
|
|
|
|
description = ''
|
|
|
|
|
The CSS that is used to style the resulting HTML.
|
|
|
|
|
'';
|
|
|
|
|
};
|
|
|
|
|
};
|
|
|
|
|
});
|
|
|
|
|
default = defaults.convert.markdown;
|
|
|
|
|
description = ''
|
|
|
|
|
Settings when processing markdown files (and other text files)
|
|
|
|
|
to HTML.
|
2020-01-19 21:53:58 +00:00
|
|
|
|
|
2020-02-22 11:40:56 +00:00
|
|
|
|
In order to support text formats, text files are first converted
|
|
|
|
|
to HTML using a markdown processor. The resulting HTML is then
|
|
|
|
|
converted to a PDF file.
|
2020-01-19 21:53:58 +00:00
|
|
|
|
'';
|
|
|
|
|
};
|
2020-02-22 11:40:56 +00:00
|
|
|
|
wkhtmlpdf = mkOption {
|
2020-01-19 21:53:58 +00:00
|
|
|
|
type = types.submodule({
|
|
|
|
|
options = {
|
|
|
|
|
working-dir = mkOption {
|
|
|
|
|
type = types.str;
|
2020-02-22 11:40:56 +00:00
|
|
|
|
default = defaults.convert.wktmlpdf.working-dir;
|
|
|
|
|
description = "Directory where the conversion processes can put their temp files";
|
2020-01-19 21:53:58 +00:00
|
|
|
|
};
|
|
|
|
|
command = mkOption {
|
|
|
|
|
type = types.submodule({
|
|
|
|
|
options = {
|
|
|
|
|
program = mkOption {
|
|
|
|
|
type = types.str;
|
2020-02-22 11:40:56 +00:00
|
|
|
|
default = defaults.convert.wkhtmlpdf.command.program;
|
2020-01-19 21:53:58 +00:00
|
|
|
|
description = "The path to the executable.";
|
|
|
|
|
};
|
|
|
|
|
args = mkOption {
|
|
|
|
|
type = types.listOf types.str;
|
2020-02-22 11:40:56 +00:00
|
|
|
|
default = defaults.convert.wkhtmlpdf.command.args;
|
2020-01-19 21:53:58 +00:00
|
|
|
|
description = "The arguments to the program";
|
|
|
|
|
};
|
|
|
|
|
timeout = mkOption {
|
|
|
|
|
type = types.str;
|
2020-02-22 11:40:56 +00:00
|
|
|
|
default = defaults.convert.wkhtmlpdf.command.timeout;
|
2020-01-19 21:53:58 +00:00
|
|
|
|
description = "The timeout when executing the command";
|
|
|
|
|
};
|
|
|
|
|
};
|
|
|
|
|
});
|
2020-02-22 11:40:56 +00:00
|
|
|
|
default = defaults.convert.wkhtmlpdf.command;
|
2020-01-19 21:53:58 +00:00
|
|
|
|
description = "The system command";
|
|
|
|
|
};
|
|
|
|
|
};
|
|
|
|
|
});
|
2020-02-22 11:40:56 +00:00
|
|
|
|
default = defaults.convert.wkhtmlpdf;
|
|
|
|
|
description = ''
|
|
|
|
|
To convert HTML files into PDF files, the external tool
|
|
|
|
|
wkhtmltopdf is used.
|
|
|
|
|
'';
|
2020-01-19 21:53:58 +00:00
|
|
|
|
};
|
2020-02-22 11:40:56 +00:00
|
|
|
|
tesseract = mkOption {
|
2020-01-19 21:53:58 +00:00
|
|
|
|
type = types.submodule({
|
|
|
|
|
options = {
|
2020-02-22 11:40:56 +00:00
|
|
|
|
working-dir = mkOption {
|
|
|
|
|
type = types.str;
|
|
|
|
|
default = defaults.convert.tesseract.working-dir;
|
|
|
|
|
description = "Directory where the conversion processes can put their temp files";
|
|
|
|
|
};
|
2020-01-19 21:53:58 +00:00
|
|
|
|
command = mkOption {
|
|
|
|
|
type = types.submodule({
|
|
|
|
|
options = {
|
|
|
|
|
program = mkOption {
|
|
|
|
|
type = types.str;
|
2020-02-22 11:40:56 +00:00
|
|
|
|
default = defaults.convert.tesseract.command.program;
|
2020-01-19 21:53:58 +00:00
|
|
|
|
description = "The path to the executable.";
|
|
|
|
|
};
|
|
|
|
|
args = mkOption {
|
|
|
|
|
type = types.listOf types.str;
|
2020-02-22 11:40:56 +00:00
|
|
|
|
default = defaults.convert.tesseract.command.args;
|
2020-01-19 21:53:58 +00:00
|
|
|
|
description = "The arguments to the program";
|
|
|
|
|
};
|
|
|
|
|
timeout = mkOption {
|
|
|
|
|
type = types.str;
|
2020-02-22 11:40:56 +00:00
|
|
|
|
default = defaults.convert.tesseract.command.timeout;
|
2020-01-19 21:53:58 +00:00
|
|
|
|
description = "The timeout when executing the command";
|
|
|
|
|
};
|
|
|
|
|
};
|
|
|
|
|
});
|
2020-02-22 11:40:56 +00:00
|
|
|
|
default = defaults.convert.tesseract.command;
|
2020-01-19 21:53:58 +00:00
|
|
|
|
description = "The system command";
|
|
|
|
|
};
|
|
|
|
|
};
|
|
|
|
|
});
|
2020-02-22 11:40:56 +00:00
|
|
|
|
default = defaults.convert.tesseract;
|
|
|
|
|
description = ''
|
|
|
|
|
To convert image files to PDF files, tesseract is used. This
|
|
|
|
|
also extracts the text in one go.
|
|
|
|
|
'';
|
2020-01-19 21:53:58 +00:00
|
|
|
|
};
|
2020-02-22 11:40:56 +00:00
|
|
|
|
unoconv = mkOption {
|
2020-01-19 21:53:58 +00:00
|
|
|
|
type = types.submodule({
|
|
|
|
|
options = {
|
2020-02-22 11:40:56 +00:00
|
|
|
|
working-dir = mkOption {
|
|
|
|
|
type = types.str;
|
|
|
|
|
default = defaults.convert.unoconv.working-dir;
|
|
|
|
|
description = "Directory where the conversion processes can put their temp files";
|
|
|
|
|
};
|
2020-01-19 21:53:58 +00:00
|
|
|
|
command = mkOption {
|
|
|
|
|
type = types.submodule({
|
|
|
|
|
options = {
|
|
|
|
|
program = mkOption {
|
|
|
|
|
type = types.str;
|
2020-02-22 11:40:56 +00:00
|
|
|
|
default = defaults.convert.unoconv.command.program;
|
2020-01-19 21:53:58 +00:00
|
|
|
|
description = "The path to the executable.";
|
|
|
|
|
};
|
|
|
|
|
args = mkOption {
|
|
|
|
|
type = types.listOf types.str;
|
2020-02-22 11:40:56 +00:00
|
|
|
|
default = defaults.convert.unoconv.command.args;
|
2020-01-19 21:53:58 +00:00
|
|
|
|
description = "The arguments to the program";
|
|
|
|
|
};
|
|
|
|
|
timeout = mkOption {
|
|
|
|
|
type = types.str;
|
2020-02-22 11:40:56 +00:00
|
|
|
|
default = defaults.convert.unoconv.command.timeout;
|
2020-01-19 21:53:58 +00:00
|
|
|
|
description = "The timeout when executing the command";
|
|
|
|
|
};
|
|
|
|
|
};
|
|
|
|
|
});
|
2020-02-22 11:40:56 +00:00
|
|
|
|
default = defaults.convert.unoconv.command;
|
2020-01-19 21:53:58 +00:00
|
|
|
|
description = "The system command";
|
|
|
|
|
};
|
|
|
|
|
};
|
|
|
|
|
});
|
2020-02-22 11:40:56 +00:00
|
|
|
|
default = defaults.convert.unoconv;
|
|
|
|
|
description = ''
|
|
|
|
|
To convert "office" files to PDF files, the external tool
|
|
|
|
|
unoconv is used. Unoconv uses libreoffice/openoffice for
|
|
|
|
|
converting. So it supports all formats that are possible to read
|
|
|
|
|
with libreoffice/openoffic.
|
|
|
|
|
|
|
|
|
|
Note: to greatly improve performance, it is recommended to start
|
|
|
|
|
a libreoffice listener by running `unoconv -l` in a separate
|
|
|
|
|
process.
|
|
|
|
|
'';
|
2020-01-19 21:53:58 +00:00
|
|
|
|
};
|
2020-07-18 10:48:41 +00:00
|
|
|
|
|
|
|
|
|
ocrmypdf = mkOption {
|
|
|
|
|
type = types.submodule({
|
|
|
|
|
options = {
|
|
|
|
|
enabled = mkOption {
|
|
|
|
|
type = types.bool;
|
|
|
|
|
default = defaults.convert.ocrmypdf.enabled;
|
|
|
|
|
description = "Whether to use ocrmypdf to convert pdf to pdf/a.";
|
|
|
|
|
};
|
|
|
|
|
working-dir = mkOption {
|
|
|
|
|
type = types.str;
|
|
|
|
|
default = defaults.convert.ocrmypdf.working-dir;
|
|
|
|
|
description = "Directory where the conversion processes can put their temp files";
|
|
|
|
|
};
|
|
|
|
|
command = mkOption {
|
|
|
|
|
type = types.submodule({
|
|
|
|
|
options = {
|
|
|
|
|
program = mkOption {
|
|
|
|
|
type = types.str;
|
|
|
|
|
default = defaults.convert.ocrmypdf.command.program;
|
|
|
|
|
description = "The path to the executable.";
|
|
|
|
|
};
|
|
|
|
|
args = mkOption {
|
|
|
|
|
type = types.listOf types.str;
|
|
|
|
|
default = defaults.convert.ocrmypdf.command.args;
|
|
|
|
|
description = "The arguments to the program";
|
|
|
|
|
};
|
|
|
|
|
timeout = mkOption {
|
|
|
|
|
type = types.str;
|
|
|
|
|
default = defaults.convert.ocrmypdf.command.timeout;
|
|
|
|
|
description = "The timeout when executing the command";
|
|
|
|
|
};
|
|
|
|
|
};
|
|
|
|
|
});
|
|
|
|
|
default = defaults.convert.ocrmypdf.command;
|
|
|
|
|
description = "The system command";
|
|
|
|
|
};
|
|
|
|
|
};
|
|
|
|
|
});
|
|
|
|
|
default = defaults.convert.orcmypdf;
|
|
|
|
|
description = ''
|
|
|
|
|
The tool ocrmypdf can be used to convert pdf files to pdf files
|
|
|
|
|
in order to add extracted text as a separate layer. This makes
|
|
|
|
|
image-only pdfs searchable and you can select and copy/paste the
|
|
|
|
|
text. It also converts pdfs into pdf/a type pdfs, which are best
|
|
|
|
|
suited for archiving. So it makes sense to use this even for
|
|
|
|
|
text-only pdfs.
|
|
|
|
|
|
|
|
|
|
It is recommended to install ocrympdf, but it also is optional.
|
|
|
|
|
If it is enabled but fails, the error is not fatal and the
|
|
|
|
|
processing will continue using the original pdf for extracting
|
|
|
|
|
text. You can also disable it to remove the errors from the
|
|
|
|
|
processing logs.
|
|
|
|
|
|
|
|
|
|
The `--skip-text` option is necessary to not fail on "text" pdfs
|
|
|
|
|
(where ocr is not necessary). In this case, the pdf will be
|
|
|
|
|
converted to PDF/A.
|
|
|
|
|
'';
|
|
|
|
|
};
|
|
|
|
|
|
2020-01-19 21:53:58 +00:00
|
|
|
|
};
|
|
|
|
|
});
|
2020-02-22 11:40:56 +00:00
|
|
|
|
default = defaults.convert;
|
2020-01-19 21:53:58 +00:00
|
|
|
|
description = ''
|
2020-02-22 11:40:56 +00:00
|
|
|
|
Configuration for converting files into PDFs.
|
2020-01-19 21:53:58 +00:00
|
|
|
|
|
2020-02-22 11:40:56 +00:00
|
|
|
|
Most of it is delegated to external tools, which can be configured
|
|
|
|
|
below. They must be in the PATH environment or specify the full
|
|
|
|
|
path below via the `program` key.
|
2020-01-19 21:53:58 +00:00
|
|
|
|
'';
|
|
|
|
|
};
|
2020-05-25 13:32:03 +00:00
|
|
|
|
files = mkOption {
|
|
|
|
|
type = types.submodule({
|
|
|
|
|
options = {
|
|
|
|
|
chunk-size = mkOption {
|
|
|
|
|
type = types.int;
|
|
|
|
|
default = defaults.files.chunk-size;
|
|
|
|
|
description = ''
|
|
|
|
|
Defines the chunk size (in bytes) used to store the files.
|
|
|
|
|
This will affect the memory footprint when uploading and
|
|
|
|
|
downloading files. At most this amount is loaded into RAM for
|
|
|
|
|
down- and uploading.
|
|
|
|
|
|
|
|
|
|
It also defines the chunk size used for the blobs inside the
|
|
|
|
|
database.
|
|
|
|
|
'';
|
|
|
|
|
};
|
|
|
|
|
valid-mime-types = mkOption {
|
|
|
|
|
type = types.listOf types.str;
|
|
|
|
|
default = defaults.files.valid-mime-types;
|
|
|
|
|
description = ''
|
|
|
|
|
The file content types that are considered valid. Docspell
|
|
|
|
|
will only pass these files to processing. The processing code
|
|
|
|
|
itself has also checks for which files are supported and which
|
|
|
|
|
not. This affects the uploading part and is a first check to
|
|
|
|
|
avoid that 'bad' files get into the system.
|
|
|
|
|
'';
|
|
|
|
|
};
|
|
|
|
|
};
|
|
|
|
|
});
|
|
|
|
|
default = defaults.files;
|
|
|
|
|
description= "Settings for how files are stored.";
|
|
|
|
|
};
|
2020-06-25 21:56:44 +00:00
|
|
|
|
full-text-search = mkOption {
|
|
|
|
|
type = types.submodule({
|
|
|
|
|
options = {
|
|
|
|
|
enabled = mkOption {
|
|
|
|
|
type = types.bool;
|
|
|
|
|
default = defaults.full-text-search.enabled;
|
|
|
|
|
description = ''
|
|
|
|
|
The full-text search feature can be disabled. It requires an
|
|
|
|
|
additional index server which needs additional memory and disk
|
|
|
|
|
space. It can be enabled later any time.
|
|
|
|
|
|
|
|
|
|
Currently the SOLR search platform is supported.
|
|
|
|
|
'';
|
|
|
|
|
};
|
|
|
|
|
solr = mkOption {
|
|
|
|
|
type = types.submodule({
|
|
|
|
|
options = {
|
|
|
|
|
url = mkOption {
|
|
|
|
|
type = types.str;
|
|
|
|
|
default = defaults.full-text-search.solr.url;
|
|
|
|
|
description = "The URL to solr";
|
|
|
|
|
};
|
|
|
|
|
commit-within = mkOption {
|
|
|
|
|
type = types.int;
|
|
|
|
|
default = defaults.full-text-search.solr.commit-within;
|
|
|
|
|
description = "Used to tell solr when to commit the data";
|
|
|
|
|
};
|
|
|
|
|
log-verbose = mkOption {
|
|
|
|
|
type = types.bool;
|
|
|
|
|
default = defaults.full-text-search.solr.log-verbose;
|
|
|
|
|
description = "If true, logs request and response bodies";
|
|
|
|
|
};
|
|
|
|
|
def-type = mkOption {
|
|
|
|
|
type = types.str;
|
|
|
|
|
default = defaults.full-text-search.solr.def-type;
|
|
|
|
|
description = ''
|
|
|
|
|
The defType parameter to lucene that defines the parser to
|
|
|
|
|
use. You might want to try "edismax" or look here:
|
|
|
|
|
https://lucene.apache.org/solr/guide/8_4/query-syntax-and-parsing.html#query-syntax-and-parsing
|
|
|
|
|
'';
|
|
|
|
|
};
|
|
|
|
|
q-op = mkOption {
|
|
|
|
|
type = types.str;
|
|
|
|
|
default = defaults.full-text-search.solr.q-op;
|
|
|
|
|
description = "The default combiner for tokens. One of {AND, OR}.";
|
|
|
|
|
};
|
|
|
|
|
};
|
|
|
|
|
});
|
|
|
|
|
default = defaults.full-text-search.solr;
|
|
|
|
|
description = "Configuration for the SOLR backend.";
|
|
|
|
|
};
|
|
|
|
|
migration = mkOption {
|
|
|
|
|
type = types.submodule({
|
|
|
|
|
options = {
|
|
|
|
|
index-all-chunk = mkOption {
|
|
|
|
|
type = types.int;
|
|
|
|
|
default = defaults.full-text-search.migration.index-all-chunk;
|
|
|
|
|
description = ''
|
|
|
|
|
Chunk size to use when indexing data from the database. This
|
|
|
|
|
many attachments are loaded into memory and pushed to the
|
|
|
|
|
full-text index.
|
|
|
|
|
'';
|
|
|
|
|
};
|
|
|
|
|
};
|
|
|
|
|
});
|
|
|
|
|
default = defaults.full-text-search.migration;
|
|
|
|
|
description = "Settings for running the index migration tasks";
|
|
|
|
|
};
|
|
|
|
|
};
|
|
|
|
|
});
|
|
|
|
|
default = defaults.full-text-search;
|
|
|
|
|
description = "Configuration for full-text search.";
|
|
|
|
|
};
|
2020-01-19 21:53:58 +00:00
|
|
|
|
};
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
## implementation
|
|
|
|
|
config = mkIf config.services.docspell-joex.enable {
|
|
|
|
|
|
|
|
|
|
users.users."${user}" = mkIf (cfg.runAs == null) {
|
|
|
|
|
name = user;
|
2020-03-01 20:34:11 +00:00
|
|
|
|
isSystemUser = false;
|
|
|
|
|
createHome = true;
|
|
|
|
|
home = "/var/docspell";
|
2020-01-19 21:53:58 +00:00
|
|
|
|
description = "Docspell user";
|
|
|
|
|
};
|
|
|
|
|
|
2020-03-01 00:23:51 +00:00
|
|
|
|
# Setting up a unoconv listener to improve conversion performance
|
|
|
|
|
systemd.services.unoconv =
|
|
|
|
|
let
|
|
|
|
|
cmd = "${pkgs.unoconv}/bin/unoconv --listener -v";
|
|
|
|
|
in
|
|
|
|
|
{
|
|
|
|
|
description = "Unoconv Listener";
|
|
|
|
|
after = [ "networking.target" ];
|
|
|
|
|
wantedBy = [ "multi-user.target" ];
|
|
|
|
|
serviceConfig = {
|
|
|
|
|
Restart = "always";
|
|
|
|
|
};
|
|
|
|
|
script =
|
|
|
|
|
"${pkgs.su}/bin/su -s ${pkgs.bash}/bin/sh ${user} -c \"${cmd}\"";
|
|
|
|
|
};
|
|
|
|
|
|
2020-01-19 21:53:58 +00:00
|
|
|
|
systemd.services.docspell-joex =
|
2020-03-01 00:23:51 +00:00
|
|
|
|
let
|
2020-09-08 16:07:04 +00:00
|
|
|
|
args = builtins.concatStringsSep " " cfg.jvmArgs;
|
|
|
|
|
cmd = "${pkgs.docspell.joex}/bin/docspell-joex ${args} -- ${configFile}";
|
2020-06-28 11:36:46 +00:00
|
|
|
|
waitTarget =
|
|
|
|
|
if cfg.waitForTarget != null
|
|
|
|
|
then
|
|
|
|
|
[ cfg.waitForTarget ]
|
|
|
|
|
else
|
|
|
|
|
[];
|
2020-03-01 00:23:51 +00:00
|
|
|
|
in
|
|
|
|
|
{
|
|
|
|
|
description = "Docspell Joex";
|
2020-06-28 11:36:46 +00:00
|
|
|
|
after = ([ "networking.target" ] ++ waitTarget);
|
2020-03-01 00:23:51 +00:00
|
|
|
|
wantedBy = [ "multi-user.target" ];
|
|
|
|
|
path = [ pkgs.gawk ];
|
2020-01-19 21:53:58 +00:00
|
|
|
|
|
2020-03-01 00:23:51 +00:00
|
|
|
|
script =
|
|
|
|
|
"${pkgs.su}/bin/su -s ${pkgs.bash}/bin/sh ${user} -c \"${cmd}\"";
|
|
|
|
|
};
|
2020-01-19 21:53:58 +00:00
|
|
|
|
};
|
|
|
|
|
}
|