diff --git a/build.sbt b/build.sbt index 6e5bb349..f590f452 100644 --- a/build.sbt +++ b/build.sbt @@ -19,14 +19,14 @@ val scalafixSettings = Seq( ) val sharedSettings = Seq( - organization := "com.github.eikek", - scalaVersion := "2.13.6", + organization := "com.github.eikek", + scalaVersion := "2.13.6", organizationName := "Docspell Contributors", licenses += ("GPL-3.0-or-later", url( "https://spdx.org/licenses/GPL-3.0-or-later.html" )), - startYear := Some(2020), - headerLicenseStyle := HeaderLicenseStyle.SpdxSyntax, + startYear := Some(2020), + headerLicenseStyle := HeaderLicenseStyle.SpdxSyntax, headerSources / excludeFilter := HiddenFileFilter || "*.java" || "StringUtil.scala", scalacOptions ++= Seq( "-deprecation", @@ -62,8 +62,8 @@ val testSettingsMUnit = Seq( ) lazy val noPublish = Seq( - publish := {}, - publishLocal := {}, + publish := {}, + publishLocal := {}, publishArtifact := false ) @@ -157,10 +157,10 @@ val buildInfoSettings = Seq( val openapiScalaSettings = Seq( openapiScalaConfig := ScalaConfig() .withJson(ScalaJson.circeSemiauto) - .addMapping(CustomMapping.forType({ case TypeDef("LocalDateTime", _) => + .addMapping(CustomMapping.forType { case TypeDef("LocalDateTime", _) => TypeDef("Timestamp", Imports("docspell.common.Timestamp")) - })) - .addMapping(CustomMapping.forFormatType({ + }) + .addMapping(CustomMapping.forFormatType { case "ident" => field => field.copy(typeDef = TypeDef("Ident", Imports("docspell.common.Ident"))) case "accountid" => @@ -246,7 +246,7 @@ val openapiScalaSettings = Seq( field => field .copy(typeDef = TypeDef("Duration", Imports("docspell.common.Duration"))) - })) + }) ) // --- Modules @@ -287,7 +287,7 @@ val files = project val files = (base ** (_.isFile)).pair(sbt.io.Path.relativeTo(base)) val lines = files.toList.map(_._2).map { s => val ident = s.replaceAll("[^a-zA-Z0-9_]+", "_") - ident -> s"""val $ident = createUrl("${s}")""" + ident -> s"""val $ident = createUrl("$s")""" } val content = s"""package docspell.files @@ -301,7 +301,7 @@ ${lines.map(_._1).mkString(",\n")} } """ - val target = (Test / sourceManaged).value / "scala" / "ExampleFiles.scala" + val target = (Test / sourceManaged).value / "scala" / "ExampleFiles.scala" IO.createDirectory(target.getParentFile) IO.write(target, content) Seq(target) @@ -436,9 +436,9 @@ val restapi = project libraryDependencies ++= Dependencies.circe, openapiTargetLanguage := Language.Scala, - openapiPackage := Pkg("docspell.restapi.model"), - openapiSpec := (Compile / resourceDirectory).value / "docspell-openapi.yml", - openapiStaticGen := OpenApiDocGenerator.Redoc + openapiPackage := Pkg("docspell.restapi.model"), + openapiSpec := (Compile / resourceDirectory).value / "docspell-openapi.yml", + openapiStaticGen := OpenApiDocGenerator.Redoc ) .dependsOn(common) @@ -456,9 +456,9 @@ val joexapi = project Dependencies.http4sCirce ++ Dependencies.http4sClient, openapiTargetLanguage := Language.Scala, - openapiPackage := Pkg("docspell.joexapi.model"), - openapiSpec := (Compile / resourceDirectory).value / "joex-openapi.yml", - openapiStaticGen := OpenApiDocGenerator.Redoc + openapiPackage := Pkg("docspell.joexapi.model"), + openapiSpec := (Compile / resourceDirectory).value / "joex-openapi.yml", + openapiStaticGen := OpenApiDocGenerator.Redoc ) .dependsOn(common) @@ -487,9 +487,9 @@ val webapp = project .settings(stylesSettings) .settings(webjarSettings(query.js)) .settings( - name := "docspell-webapp", + name := "docspell-webapp", openapiTargetLanguage := Language.Elm, - openapiPackage := Pkg("Api.Model"), + openapiPackage := Pkg("Api.Model"), openapiSpec := (restapi / Compile / resourceDirectory).value / "docspell-openapi.yml", openapiElmConfig := ElmConfig().withJson(ElmJson.decodePipeline) ) @@ -507,7 +507,7 @@ val joex = project .settings( name := "docspell-joex", description := "The joex component (job executor) for docspell which executes long-running tasks.", - packageSummary := "Docspell Joex", + packageSummary := "Docspell Joex", packageDescription := description.value, libraryDependencies ++= Dependencies.fs2 ++ @@ -544,9 +544,9 @@ val restserver = project .settings(debianSettings("docspell-server")) .settings(buildInfoSettings) .settings( - name := "docspell-restserver", - description := "Docspell server providing the user interface and a REST Api.", - packageSummary := "Docspell Rest server", + name := "docspell-restserver", + description := "Docspell server providing the user interface and a REST Api.", + packageSummary := "Docspell Rest server", packageDescription := description.value, libraryDependencies ++= Dependencies.http4sServer ++ @@ -601,12 +601,12 @@ val website = project .enablePlugins(ZolaPlugin, GitHubPagesPlugin) .settings(sharedSettings) .settings( - name := "docspell-website", - publishArtifact := false, - publish / skip := true, - gitHubPagesOrgName := "eikek", + name := "docspell-website", + publishArtifact := false, + publish / skip := true, + gitHubPagesOrgName := "eikek", gitHubPagesRepoName := "docspell", - gitHubPagesSiteDir := zolaOutputDir.value, + gitHubPagesSiteDir := zolaOutputDir.value, Compile / resourceGenerators += Def.task { val templateOut = baseDirectory.value / "site" / "templates" / "shortcodes" val staticOut = baseDirectory.value / "site" / "static" / "openapi" @@ -614,11 +614,11 @@ val website = project val logger = streams.value.log val files = Seq( - (restserver / Compile / resourceDirectory).value / "reference.conf" -> templateOut / "server.conf", - (joex / Compile / resourceDirectory).value / "reference.conf" -> templateOut / "joex.conf", + (restserver / Compile / resourceDirectory).value / "reference.conf" -> templateOut / "server.conf", + (joex / Compile / resourceDirectory).value / "reference.conf" -> templateOut / "joex.conf", (LocalRootProject / baseDirectory).value / "tools" / "exim" / "exim.conf" -> templateOut / "sample-exim.conf", - (restapi / Compile / resourceDirectory).value / "docspell-openapi.yml" -> staticOut / "docspell-openapi.yml", - (restapi / Compile / openapiStaticDoc).value -> staticOut / "docspell-openapi.html" + (restapi / Compile / resourceDirectory).value / "docspell-openapi.yml" -> staticOut / "docspell-openapi.yml", + (restapi / Compile / openapiStaticDoc).value -> staticOut / "docspell-openapi.html" ) IO.copy(files) files.map(_._2) @@ -759,7 +759,7 @@ def packageTools(logger: Logger, dir: File, version: String): Seq[File] = { val target = dir / "target" IO.delete(target) IO.createDirectory(target) - val archive = target / s"docspell-tools-${version}.zip" + val archive = target / s"docspell-tools-$version.zip" logger.info(s"Packaging tools to $archive ...") val webext = target / "docspell-firefox-extension.xpi" val wx = dir / "webextension" @@ -782,13 +782,13 @@ def packageTools(logger: Logger, dir: File, version: String): Seq[File] = { (dir ** "*") .filter(f => !excludes.exists(p => f.absolutePath.startsWith(p.absolutePath))) .pair(sbt.io.Path.relativeTo(dir)) - .map({ case (f, name) => (f, s"docspell-tools-${version}/$name") }) + .map { case (f, name) => (f, s"docspell-tools-$version/$name") } IO.zip( Seq( - webext -> s"docspell-tools-${version}/firefox/docspell-extension.xpi", - wx / "native/app_manifest.json" -> s"docspell-tools-${version}/firefox/native/app_manifest.json", - wx / "native/native.py" -> s"docspell-tools-${version}/firefox/native/native.py" + webext -> s"docspell-tools-$version/firefox/docspell-extension.xpi", + wx / "native/app_manifest.json" -> s"docspell-tools-$version/firefox/native/app_manifest.json", + wx / "native/native.py" -> s"docspell-tools-$version/firefox/native/native.py" ) ++ files, archive, None diff --git a/modules/analysis/src/main/scala/docspell/analysis/classifier/StanfordTextClassifier.scala b/modules/analysis/src/main/scala/docspell/analysis/classifier/StanfordTextClassifier.scala index 3c91fefd..810bad77 100644 --- a/modules/analysis/src/main/scala/docspell/analysis/classifier/StanfordTextClassifier.scala +++ b/modules/analysis/src/main/scala/docspell/analysis/classifier/StanfordTextClassifier.scala @@ -155,10 +155,8 @@ final class StanfordTextClassifier[F[_]: Async](cfg: TextClassifierConfig) case class TrainResult(score: Double, model: ClassifierModel) def prepend(pre: String, data: Map[String, String]): Map[String, String] = - data.toList - .map({ case (k, v) => - if (k.startsWith(pre)) (k, v) - else (pre + k, v) - }) - .toMap + data.toList.map { case (k, v) => + if (k.startsWith(pre)) (k, v) + else (pre + k, v) + }.toMap } diff --git a/modules/analysis/src/main/scala/docspell/analysis/contact/Domain.scala b/modules/analysis/src/main/scala/docspell/analysis/contact/Domain.scala index ae4eaa0b..c53dd789 100644 --- a/modules/analysis/src/main/scala/docspell/analysis/contact/Domain.scala +++ b/modules/analysis/src/main/scala/docspell/analysis/contact/Domain.scala @@ -32,7 +32,7 @@ object Domain { Tld .findTld(str) .map(tld => (str.dropRight(tld.length), tld)) - .map({ case (names, tld) => + .map { case (names, tld) => names.split('.').toList match { case Nil => Left(s"Not a domain: $str") case segs @@ -43,7 +43,7 @@ object Domain { Right(Domain(NonEmptyList.fromListUnsafe(segs), tld)) case _ => Left(s"Not a domain: $str") } - }) + } .getOrElse(Left(s"Not a domain $str")) def isDomain(str: String): Boolean = diff --git a/modules/analysis/src/main/scala/docspell/analysis/date/DateFind.scala b/modules/analysis/src/main/scala/docspell/analysis/date/DateFind.scala index 4d90324e..1257a5c9 100644 --- a/modules/analysis/src/main/scala/docspell/analysis/date/DateFind.scala +++ b/modules/analysis/src/main/scala/docspell/analysis/date/DateFind.scala @@ -160,11 +160,11 @@ object DateFind { Reader(words => Nel.of(reader, more: _*).map(_.read(words)).reduce) def readFirst[A](f: Word => Option[A]): Reader[A] = - Reader({ + Reader { case Nil => Result.Failure case a :: as => f(a).map(value => Result.Success(value, as)).getOrElse(Result.Failure) - }) + } } sealed trait Result[+A] { diff --git a/modules/analysis/src/main/scala/docspell/analysis/date/MonthName.scala b/modules/analysis/src/main/scala/docspell/analysis/date/MonthName.scala index 37b16852..63d51a9a 100644 --- a/modules/analysis/src/main/scala/docspell/analysis/date/MonthName.scala +++ b/modules/analysis/src/main/scala/docspell/analysis/date/MonthName.scala @@ -15,7 +15,7 @@ object MonthName { private def merge(n0: List[List[String]], ns: List[List[String]]*): List[List[String]] = ns.foldLeft(n0) { (res, el) => - res.zip(el).map({ case (a, b) => a ++ b }) + res.zip(el).map { case (a, b) => a ++ b } } private def forLang(lang: Language): List[List[String]] = diff --git a/modules/analysis/src/main/scala/docspell/analysis/nlp/Annotator.scala b/modules/analysis/src/main/scala/docspell/analysis/nlp/Annotator.scala index ae5620fc..cb191104 100644 --- a/modules/analysis/src/main/scala/docspell/analysis/nlp/Annotator.scala +++ b/modules/analysis/src/main/scala/docspell/analysis/nlp/Annotator.scala @@ -35,12 +35,12 @@ object Annotator { * * There are the following ways: * - * - disabled: it returns a no-op annotator that always gives an empty list - * - full: the complete stanford pipeline is used - * - basic: only the ner classifier is used + * - disabled: it returns a no-op annotator that always gives an empty list + * - full: the complete stanford pipeline is used + * - basic: only the ner classifier is used * - * Additionally, if there is a regexNer-file specified, the regexner annotator is - * also run. In case the full pipeline is used, this is already included. + * Additionally, if there is a regexNer-file specified, the regexner annotator is also + * run. In case the full pipeline is used, this is already included. */ def apply[F[_]: Sync](mode: NlpMode)(settings: NlpSettings): Annotator[F] = mode match { diff --git a/modules/analysis/src/main/scala/docspell/analysis/nlp/BasicCRFAnnotator.scala b/modules/analysis/src/main/scala/docspell/analysis/nlp/BasicCRFAnnotator.scala index 28484b74..8026188c 100644 --- a/modules/analysis/src/main/scala/docspell/analysis/nlp/BasicCRFAnnotator.scala +++ b/modules/analysis/src/main/scala/docspell/analysis/nlp/BasicCRFAnnotator.scala @@ -21,10 +21,9 @@ import edu.stanford.nlp.ie.crf.CRFClassifier import edu.stanford.nlp.ling.{CoreAnnotations, CoreLabel} import org.log4s.getLogger -/** This is only using the CRFClassifier without building an analysis - * pipeline. The ner-classifier cannot use results from POS-tagging - * etc. and is therefore not as good as the [[StanfordNerAnnotator]]. - * But it uses less memory, while still being not bad. +/** This is only using the CRFClassifier without building an analysis pipeline. The + * ner-classifier cannot use results from POS-tagging etc. and is therefore not as good + * as the [[StanfordNerAnnotator]]. But it uses less memory, while still being not bad. */ object BasicCRFAnnotator { private[this] val logger = getLogger diff --git a/modules/analysis/src/main/scala/docspell/analysis/nlp/PipelineCache.scala b/modules/analysis/src/main/scala/docspell/analysis/nlp/PipelineCache.scala index 0b530715..0aa5e26e 100644 --- a/modules/analysis/src/main/scala/docspell/analysis/nlp/PipelineCache.scala +++ b/modules/analysis/src/main/scala/docspell/analysis/nlp/PipelineCache.scala @@ -17,8 +17,8 @@ import docspell.common._ import org.log4s.getLogger -/** Creating the StanfordCoreNLP pipeline is quite expensive as it - * involves IO and initializing large objects. +/** Creating the StanfordCoreNLP pipeline is quite expensive as it involves IO and + * initializing large objects. * * Therefore, the instances are cached, because they are thread-safe. * diff --git a/modules/analysis/src/main/scala/docspell/analysis/nlp/Properties.scala b/modules/analysis/src/main/scala/docspell/analysis/nlp/Properties.scala index 591565d7..25facf0a 100644 --- a/modules/analysis/src/main/scala/docspell/analysis/nlp/Properties.scala +++ b/modules/analysis/src/main/scala/docspell/analysis/nlp/Properties.scala @@ -44,48 +44,48 @@ object Properties { def nerGerman(regexNerMappingFile: Option[String], highRecall: Boolean): JProps = Properties( - "annotators" -> "tokenize,ssplit,mwt,pos,lemma,ner", - "tokenize.language" -> "de", - "mwt.mappingFile" -> "edu/stanford/nlp/models/mwt/german/german-mwt.tsv", - "pos.model" -> "edu/stanford/nlp/models/pos-tagger/german-ud.tagger", - "ner.statisticalOnly" -> "true", - "ner.rulesOnly" -> "false", - "ner.applyFineGrained" -> "false", + "annotators" -> "tokenize,ssplit,mwt,pos,lemma,ner", + "tokenize.language" -> "de", + "mwt.mappingFile" -> "edu/stanford/nlp/models/mwt/german/german-mwt.tsv", + "pos.model" -> "edu/stanford/nlp/models/pos-tagger/german-ud.tagger", + "ner.statisticalOnly" -> "true", + "ner.rulesOnly" -> "false", + "ner.applyFineGrained" -> "false", "ner.applyNumericClassifiers" -> "false", //only english supported, not needed currently - "ner.useSUTime" -> "false", //only english, unused in docspell - "ner.language" -> "de", - "ner.model" -> "edu/stanford/nlp/models/ner/german.distsim.crf.ser.gz,edu/stanford/nlp/models/ner/english.conll.4class.distsim.crf.ser.gz" + "ner.useSUTime" -> "false", //only english, unused in docspell + "ner.language" -> "de", + "ner.model" -> "edu/stanford/nlp/models/ner/german.distsim.crf.ser.gz,edu/stanford/nlp/models/ner/english.conll.4class.distsim.crf.ser.gz" ).withRegexNer(regexNerMappingFile).withHighRecall(highRecall) def nerEnglish(regexNerMappingFile: Option[String]): JProps = Properties( - "annotators" -> "tokenize,ssplit,pos,lemma,ner", - "tokenize.language" -> "en", - "pos.model" -> "edu/stanford/nlp/models/pos-tagger/english-left3words-distsim.tagger", + "annotators" -> "tokenize,ssplit,pos,lemma,ner", + "tokenize.language" -> "en", + "pos.model" -> "edu/stanford/nlp/models/pos-tagger/english-left3words-distsim.tagger", "ner.statisticalOnly" -> "true", "ner.rulesOnly" -> "false", "ner.applyFineGrained" -> "false", "ner.applyNumericClassifiers" -> "false", "ner.useSUTime" -> "false", "ner.language" -> "en", - "ner.model" -> "edu/stanford/nlp/models/ner/english.conll.4class.distsim.crf.ser.gz" + "ner.model" -> "edu/stanford/nlp/models/ner/english.conll.4class.distsim.crf.ser.gz" ).withRegexNer(regexNerMappingFile) def nerFrench(regexNerMappingFile: Option[String], highRecall: Boolean): JProps = Properties( - "annotators" -> "tokenize,ssplit,mwt,pos,lemma,ner", - "tokenize.language" -> "fr", - "mwt.mappingFile" -> "edu/stanford/nlp/models/mwt/french/french-mwt.tsv", - "mwt.pos.model" -> "edu/stanford/nlp/models/mwt/french/french-mwt.tagger", - "mwt.statisticalMappingFile" -> "edu/stanford/nlp/models/mwt/french/french-mwt-statistical.tsv", - "pos.model" -> "edu/stanford/nlp/models/pos-tagger/french-ud.tagger", - "ner.statisticalOnly" -> "true", - "ner.rulesOnly" -> "false", - "ner.applyFineGrained" -> "false", + "annotators" -> "tokenize,ssplit,mwt,pos,lemma,ner", + "tokenize.language" -> "fr", + "mwt.mappingFile" -> "edu/stanford/nlp/models/mwt/french/french-mwt.tsv", + "mwt.pos.model" -> "edu/stanford/nlp/models/mwt/french/french-mwt.tagger", + "mwt.statisticalMappingFile" -> "edu/stanford/nlp/models/mwt/french/french-mwt-statistical.tsv", + "pos.model" -> "edu/stanford/nlp/models/pos-tagger/french-ud.tagger", + "ner.statisticalOnly" -> "true", + "ner.rulesOnly" -> "false", + "ner.applyFineGrained" -> "false", "ner.applyNumericClassifiers" -> "false", "ner.useSUTime" -> "false", "ner.language" -> "de", - "ner.model" -> "edu/stanford/nlp/models/ner/french-wikiner-4class.crf.ser.gz,edu/stanford/nlp/models/ner/english.conll.4class.distsim.crf.ser.gz" + "ner.model" -> "edu/stanford/nlp/models/ner/french-wikiner-4class.crf.ser.gz,edu/stanford/nlp/models/ner/english.conll.4class.distsim.crf.ser.gz" ).withRegexNer(regexNerMappingFile).withHighRecall(highRecall) def regexNerOnly(regexNerMappingFile: Path): JProps = diff --git a/modules/analysis/src/main/scala/docspell/analysis/nlp/StanfordNerAnnotator.scala b/modules/analysis/src/main/scala/docspell/analysis/nlp/StanfordNerAnnotator.scala index e036e910..b30745c6 100644 --- a/modules/analysis/src/main/scala/docspell/analysis/nlp/StanfordNerAnnotator.scala +++ b/modules/analysis/src/main/scala/docspell/analysis/nlp/StanfordNerAnnotator.scala @@ -22,13 +22,11 @@ object StanfordNerAnnotator { /** Runs named entity recognition on the given `text`. * * This uses the classifier pipeline from stanford-nlp, see - * https://nlp.stanford.edu/software/CRF-NER.html. Creating these - * classifiers is quite expensive, it involves loading large model - * files. The classifiers are thread-safe and so they are cached. - * The `cacheKey` defines the "slot" where classifiers are stored - * and retrieved. If for a given `cacheKey` the `settings` change, - * a new classifier must be created. It will then replace the - * previous one. + * https://nlp.stanford.edu/software/CRF-NER.html. Creating these classifiers is quite + * expensive, it involves loading large model files. The classifiers are thread-safe + * and so they are cached. The `cacheKey` defines the "slot" where classifiers are + * stored and retrieved. If for a given `cacheKey` the `settings` change, a new + * classifier must be created. It will then replace the previous one. */ def nerAnnotate(nerClassifier: StanfordCoreNLP, text: String): Vector[NerLabel] = { val doc = new CoreDocument(text) diff --git a/modules/analysis/src/main/scala/docspell/analysis/nlp/StanfordNerSettings.scala b/modules/analysis/src/main/scala/docspell/analysis/nlp/StanfordNerSettings.scala index 7fe8282f..bc62a0f8 100644 --- a/modules/analysis/src/main/scala/docspell/analysis/nlp/StanfordNerSettings.scala +++ b/modules/analysis/src/main/scala/docspell/analysis/nlp/StanfordNerSettings.scala @@ -17,18 +17,16 @@ object StanfordNerSettings { /** Settings for configuring the stanford NER pipeline. * - * The language is mandatory, only the provided ones are supported. - * The `highRecall` only applies for non-English languages. For - * non-English languages the english classifier is run as second - * classifier and if `highRecall` is true, then it will be used to - * tag untagged tokens. This may lead to a lot of false positives, - * but since English is omnipresent in other languages, too it - * depends on the use case for whether this is useful or not. + * The language is mandatory, only the provided ones are supported. The `highRecall` + * only applies for non-English languages. For non-English languages the english + * classifier is run as second classifier and if `highRecall` is true, then it will be + * used to tag untagged tokens. This may lead to a lot of false positives, but since + * English is omnipresent in other languages, too it depends on the use case for + * whether this is useful or not. * * The `regexNer` allows to specify a text file as described here: - * https://nlp.stanford.edu/software/regexner.html. This will be used - * as a last step to tag untagged tokens using the provided list of - * regexps. + * https://nlp.stanford.edu/software/regexner.html. This will be used as a last step to + * tag untagged tokens using the provided list of regexps. */ case class Full( lang: NLPLanguage, @@ -36,7 +34,8 @@ object StanfordNerSettings { regexNer: Option[Path] ) extends StanfordNerSettings - /** Not all languages are supported with predefined statistical models. This allows to provide regexps only. + /** Not all languages are supported with predefined statistical models. This allows to + * provide regexps only. */ case class RegexOnly(regexNerFile: Path) extends StanfordNerSettings diff --git a/modules/analysis/src/test/scala/docspell/analysis/classifier/StanfordTextClassifierSuite.scala b/modules/analysis/src/test/scala/docspell/analysis/classifier/StanfordTextClassifierSuite.scala index f35338e0..939dceea 100644 --- a/modules/analysis/src/test/scala/docspell/analysis/classifier/StanfordTextClassifierSuite.scala +++ b/modules/analysis/src/test/scala/docspell/analysis/classifier/StanfordTextClassifierSuite.scala @@ -37,9 +37,9 @@ class StanfordTextClassifierSuite extends FunSuite { .repeat .take(10) ) - .flatMap({ case (a, b) => + .flatMap { case (a, b) => Stream.emits(Seq(a, b)) - }) + } .covary[IO] val modelExists = { @@ -52,7 +52,7 @@ class StanfordTextClassifierSuite extends FunSuite { } test("run classifier") { - val cfg = TextClassifierConfig(File.path(Paths.get("target")), NonEmptyList.of(Map())) + val cfg = TextClassifierConfig(File.path(Paths.get("target")), NonEmptyList.of(Map())) val things = File.withTempDir[IO](File.path(Paths.get("target")), "testcls") things diff --git a/modules/backend/src/main/scala/docspell/backend/auth/TokenUtil.scala b/modules/backend/src/main/scala/docspell/backend/auth/TokenUtil.scala index 470798c5..4ced29a7 100644 --- a/modules/backend/src/main/scala/docspell/backend/auth/TokenUtil.scala +++ b/modules/backend/src/main/scala/docspell/backend/auth/TokenUtil.scala @@ -41,6 +41,6 @@ private[auth] object TokenUtil { def constTimeEq(s1: String, s2: String): Boolean = s1.zip(s2) - .foldLeft(true)({ case (r, (c1, c2)) => r & c1 == c2 }) & s1.length == s2.length + .foldLeft(true) { case (r, (c1, c2)) => r & c1 == c2 } & s1.length == s2.length } diff --git a/modules/backend/src/main/scala/docspell/backend/fulltext/CreateIndex.scala b/modules/backend/src/main/scala/docspell/backend/fulltext/CreateIndex.scala index e0865cf1..b9edb816 100644 --- a/modules/backend/src/main/scala/docspell/backend/fulltext/CreateIndex.scala +++ b/modules/backend/src/main/scala/docspell/backend/fulltext/CreateIndex.scala @@ -18,8 +18,8 @@ import docspell.store.queries.QItem trait CreateIndex[F[_]] { - /** Low-level function to re-index data. It is not submitted as a job, - * but invoked on the current machine. + /** Low-level function to re-index data. It is not submitted as a job, but invoked on + * the current machine. */ def reIndexData( logger: Logger[F], diff --git a/modules/backend/src/main/scala/docspell/backend/item/Merge.scala b/modules/backend/src/main/scala/docspell/backend/item/Merge.scala index 54a280d7..c66c7c0f 100644 --- a/modules/backend/src/main/scala/docspell/backend/item/Merge.scala +++ b/modules/backend/src/main/scala/docspell/backend/item/Merge.scala @@ -84,9 +84,9 @@ object Merge { nextPos <- store.transact(RAttachment.nextPosition(target)) attachs <- store.transact(items.tail.traverse(id => RAttachment.findByItem(id))) attachFlat = attachs.flatMap(_.toList) - n <- attachFlat.zipWithIndex.traverse({ case (a, idx) => + n <- attachFlat.zipWithIndex.traverse { case (a, idx) => store.transact(RAttachment.updateItemId(a.id, target, nextPos + idx)) - }) + } } yield n.sum } diff --git a/modules/backend/src/main/scala/docspell/backend/ops/OCollective.scala b/modules/backend/src/main/scala/docspell/backend/ops/OCollective.scala index 76be8a0d..057e0dd7 100644 --- a/modules/backend/src/main/scala/docspell/backend/ops/OCollective.scala +++ b/modules/backend/src/main/scala/docspell/backend/ops/OCollective.scala @@ -63,8 +63,8 @@ trait OCollective[F[_]] { def startEmptyTrash(args: EmptyTrashArgs): F[Unit] - /** Submits a task that (re)generates the preview images for all - * attachments of the given collective. + /** Submits a task that (re)generates the preview images for all attachments of the + * given collective. */ def generatePreviews( storeMode: MakePreviewArgs.StoreMode, @@ -180,7 +180,7 @@ object OCollective { id <- Ident.randomId[F] settings = sett.emptyTrash.getOrElse(EmptyTrash.default) args = EmptyTrashArgs(coll, settings.minAge) - ut = UserTask(id, EmptyTrashArgs.taskName, true, settings.schedule, None, args) + ut = UserTask(id, EmptyTrashArgs.taskName, true, settings.schedule, None, args) _ <- uts.updateOneTask(UserTaskScope(coll), args.makeSubject.some, ut) _ <- joex.notifyAllNodes } yield () diff --git a/modules/backend/src/main/scala/docspell/backend/ops/OFolder.scala b/modules/backend/src/main/scala/docspell/backend/ops/OFolder.scala index ad9e0c74..b7e79766 100644 --- a/modules/backend/src/main/scala/docspell/backend/ops/OFolder.scala +++ b/modules/backend/src/main/scala/docspell/backend/ops/OFolder.scala @@ -23,9 +23,8 @@ trait OFolder[F[_]] { def findById(id: Ident, account: AccountId): F[Option[OFolder.FolderDetail]] - /** Adds a new folder. If `login` is non-empty, the `folder.user` - * property is ignored and the user-id is determined by the given - * login name. + /** Adds a new folder. If `login` is non-empty, the `folder.user` property is ignored + * and the user-id is determined by the given login name. */ def add(folder: RFolder, login: Option[Ident]): F[AddResult] diff --git a/modules/backend/src/main/scala/docspell/backend/ops/OFulltext.scala b/modules/backend/src/main/scala/docspell/backend/ops/OFulltext.scala index 6e00666e..c3f48c12 100644 --- a/modules/backend/src/main/scala/docspell/backend/ops/OFulltext.scala +++ b/modules/backend/src/main/scala/docspell/backend/ops/OFulltext.scala @@ -49,13 +49,12 @@ trait OFulltext[F[_]] { def findIndexOnlySummary(account: AccountId, fts: OFulltext.FtsInput): F[SearchSummary] def findItemsSummary(q: Query, fts: OFulltext.FtsInput): F[SearchSummary] - /** Clears the full-text index completely and launches a task that - * indexes all data. + /** Clears the full-text index completely and launches a task that indexes all data. */ def reindexAll: F[Unit] - /** Clears the full-text index for the given collective and starts a - * task indexing all their data. + /** Clears the full-text index for the given collective and starts a task indexing all + * their data. */ def reindexCollective(account: AccountId): F[Unit] } @@ -125,7 +124,7 @@ object OFulltext { FtsQuery.HighlightSetting(ftsQ.highlightPre, ftsQ.highlightPost) ) for { - _ <- logger.ftrace(s"Find index only: ${ftsQ.query}/${batch}") + _ <- logger.ftrace(s"Find index only: ${ftsQ.query}/$batch") folders <- store.transact(QFolder.getMemberFolders(account)) ftsR <- fts.search(fq.withFolders(folders)) ftsItems = ftsR.results.groupBy(_.itemId) @@ -154,7 +153,7 @@ object OFulltext { res = itemsWithTags .collect(convertFtsData(ftsR, ftsItems)) - .map({ case (li, fd) => FtsItemWithTags(li, fd) }) + .map { case (li, fd) => FtsItemWithTags(li, fd) } } yield res } @@ -203,7 +202,7 @@ object OFulltext { ) .drop(batch.offset.toLong) .take(batch.limit.toLong) - .map({ case (li, fd) => FtsItem(li, fd) }) + .map { case (li, fd) => FtsItem(li, fd) } .compile .toVector @@ -221,7 +220,7 @@ object OFulltext { ) .drop(batch.offset.toLong) .take(batch.limit.toLong) - .map({ case (li, fd) => FtsItemWithTags(li, fd) }) + .map { case (li, fd) => FtsItemWithTags(li, fd) } .compile .toVector diff --git a/modules/backend/src/main/scala/docspell/backend/ops/OItem.scala b/modules/backend/src/main/scala/docspell/backend/ops/OItem.scala index 4cf31aee..79ce2ee8 100644 --- a/modules/backend/src/main/scala/docspell/backend/ops/OItem.scala +++ b/modules/backend/src/main/scala/docspell/backend/ops/OItem.scala @@ -28,9 +28,8 @@ trait OItem[F[_]] { /** Sets the given tags (removing all existing ones). */ def setTags(item: Ident, tagIds: List[String], collective: Ident): F[UpdateResult] - /** Sets tags for multiple items. The tags of the items will be - * replaced with the given ones. Same as `setTags` but for multiple - * items. + /** Sets tags for multiple items. The tags of the items will be replaced with the given + * ones. Same as `setTags` but for multiple items. */ def setTagsMultipleItems( items: NonEmptyList[Ident], @@ -41,8 +40,8 @@ trait OItem[F[_]] { /** Create a new tag and add it to the item. */ def addNewTag(item: Ident, tag: RTag): F[AddResult] - /** Apply all tags to the given item. Tags must exist, but can be IDs - * or names. Existing tags on the item are left unchanged. + /** Apply all tags to the given item. Tags must exist, but can be IDs or names. Existing + * tags on the item are left unchanged. */ def linkTags(item: Ident, tags: List[String], collective: Ident): F[UpdateResult] @@ -163,10 +162,9 @@ trait OItem[F[_]] { collective: Ident ): F[UpdateResult] - /** Submits the item for re-processing. The list of attachment ids can - * be used to only re-process a subset of the item's attachments. - * If this list is empty, all attachments are reprocessed. This - * call only submits the job into the queue. + /** Submits the item for re-processing. The list of attachment ids can be used to only + * re-process a subset of the item's attachments. If this list is empty, all + * attachments are reprocessed. This call only submits the job into the queue. */ def reprocess( item: Ident, @@ -181,9 +179,8 @@ trait OItem[F[_]] { notifyJoex: Boolean ): F[UpdateResult] - /** Submits a task that finds all non-converted pdfs and triggers - * converting them using ocrmypdf. Each file is converted by a - * separate task. + /** Submits a task that finds all non-converted pdfs and triggers converting them using + * ocrmypdf. Each file is converted by a separate task. */ def convertAllPdf( collective: Option[Ident], @@ -191,8 +188,7 @@ trait OItem[F[_]] { notifyJoex: Boolean ): F[UpdateResult] - /** Submits a task that (re)generates the preview image for an - * attachment. + /** Submits a task that (re)generates the preview image for an attachment. */ def generatePreview( args: MakePreviewArgs, @@ -200,8 +196,7 @@ trait OItem[F[_]] { notifyJoex: Boolean ): F[UpdateResult] - /** Submits a task that (re)generates the preview images for all - * attachments. + /** Submits a task that (re)generates the preview images for all attachments. */ def generateAllPreviews( storeMode: MakePreviewArgs.StoreMode, diff --git a/modules/backend/src/main/scala/docspell/backend/ops/OItemSearch.scala b/modules/backend/src/main/scala/docspell/backend/ops/OItemSearch.scala index d7902d36..3c1596df 100644 --- a/modules/backend/src/main/scala/docspell/backend/ops/OItemSearch.scala +++ b/modules/backend/src/main/scala/docspell/backend/ops/OItemSearch.scala @@ -183,7 +183,7 @@ object OItemSearch { def findAttachment(id: Ident, collective: Ident): F[Option[AttachmentData[F]]] = store .transact(RAttachment.findByIdAndCollective(id, collective)) - .flatMap({ + .flatMap { case Some(ra) => makeBinaryData(ra.fileId) { m => AttachmentData[F]( @@ -195,7 +195,7 @@ object OItemSearch { case None => (None: Option[AttachmentData[F]]).pure[F] - }) + } def findAttachmentSource( id: Ident, @@ -203,7 +203,7 @@ object OItemSearch { ): F[Option[AttachmentSourceData[F]]] = store .transact(RAttachmentSource.findByIdAndCollective(id, collective)) - .flatMap({ + .flatMap { case Some(ra) => makeBinaryData(ra.fileId) { m => AttachmentSourceData[F]( @@ -215,7 +215,7 @@ object OItemSearch { case None => (None: Option[AttachmentSourceData[F]]).pure[F] - }) + } def findAttachmentPreview( id: Ident, @@ -223,7 +223,7 @@ object OItemSearch { ): F[Option[AttachmentPreviewData[F]]] = store .transact(RAttachmentPreview.findByIdAndCollective(id, collective)) - .flatMap({ + .flatMap { case Some(ra) => makeBinaryData(ra.fileId) { m => AttachmentPreviewData[F]( @@ -235,7 +235,7 @@ object OItemSearch { case None => (None: Option[AttachmentPreviewData[F]]).pure[F] - }) + } def findItemPreview( item: Ident, @@ -243,7 +243,7 @@ object OItemSearch { ): F[Option[AttachmentPreviewData[F]]] = store .transact(RAttachmentPreview.findByItemAndCollective(item, collective)) - .flatMap({ + .flatMap { case Some(ra) => makeBinaryData(ra.fileId) { m => AttachmentPreviewData[F]( @@ -255,7 +255,7 @@ object OItemSearch { case None => (None: Option[AttachmentPreviewData[F]]).pure[F] - }) + } def findAttachmentArchive( id: Ident, @@ -263,7 +263,7 @@ object OItemSearch { ): F[Option[AttachmentArchiveData[F]]] = store .transact(RAttachmentArchive.findByIdAndCollective(id, collective)) - .flatMap({ + .flatMap { case Some(ra) => makeBinaryData(ra.fileId) { m => AttachmentArchiveData[F]( @@ -275,7 +275,7 @@ object OItemSearch { case None => (None: Option[AttachmentArchiveData[F]]).pure[F] - }) + } private def makeBinaryData[A](fileId: Ident)(f: FileMeta => A): F[Option[A]] = store.bitpeace diff --git a/modules/backend/src/main/scala/docspell/backend/ops/OOrganization.scala b/modules/backend/src/main/scala/docspell/backend/ops/OOrganization.scala index 64c664f5..4bd077ed 100644 --- a/modules/backend/src/main/scala/docspell/backend/ops/OOrganization.scala +++ b/modules/backend/src/main/scala/docspell/backend/ops/OOrganization.scala @@ -64,14 +64,14 @@ object OOrganization { ): F[Vector[OrgAndContacts]] = store .transact(QOrganization.findOrgAndContact(account.collective, query, _.name)) - .map({ case (org, cont) => OrgAndContacts(org, cont) }) + .map { case (org, cont) => OrgAndContacts(org, cont) } .compile .toVector def findOrg(account: AccountId, orgId: Ident): F[Option[OrgAndContacts]] = store .transact(QOrganization.getOrgAndContact(account.collective, orgId)) - .map(_.map({ case (org, cont) => OrgAndContacts(org, cont) })) + .map(_.map { case (org, cont) => OrgAndContacts(org, cont) }) def findAllOrgRefs( account: AccountId, @@ -91,14 +91,14 @@ object OOrganization { ): F[Vector[PersonAndContacts]] = store .transact(QOrganization.findPersonAndContact(account.collective, query, _.name)) - .map({ case (person, org, cont) => PersonAndContacts(person, org, cont) }) + .map { case (person, org, cont) => PersonAndContacts(person, org, cont) } .compile .toVector def findPerson(account: AccountId, persId: Ident): F[Option[PersonAndContacts]] = store .transact(QOrganization.getPersonAndContact(account.collective, persId)) - .map(_.map({ case (pers, org, cont) => PersonAndContacts(pers, org, cont) })) + .map(_.map { case (pers, org, cont) => PersonAndContacts(pers, org, cont) }) def findAllPersonRefs( account: AccountId, diff --git a/modules/backend/src/main/scala/docspell/backend/ops/OSimpleSearch.scala b/modules/backend/src/main/scala/docspell/backend/ops/OSimpleSearch.scala index a5fab21e..63c13a7a 100644 --- a/modules/backend/src/main/scala/docspell/backend/ops/OSimpleSearch.scala +++ b/modules/backend/src/main/scala/docspell/backend/ops/OSimpleSearch.scala @@ -19,31 +19,27 @@ import docspell.store.queries.SearchSummary import org.log4s.getLogger -/** A "porcelain" api on top of OFulltext and OItemSearch. This takes - * care of restricting the items to a subset, e.g. only items that - * have a "valid" state. +/** A "porcelain" api on top of OFulltext and OItemSearch. This takes care of restricting + * the items to a subset, e.g. only items that have a "valid" state. */ trait OSimpleSearch[F[_]] { - /** Search for items using the given query and optional fulltext - * search. + /** Search for items using the given query and optional fulltext search. * - * When using fulltext search only (the query is empty), only the - * index is searched. It is assumed that the index doesn't contain - * "invalid" items. When using a query, then a condition to select - * only valid items is added to it. + * When using fulltext search only (the query is empty), only the index is searched. It + * is assumed that the index doesn't contain "invalid" items. When using a query, then + * a condition to select only valid items is added to it. */ def search(settings: Settings)(q: Query, fulltextQuery: Option[String]): F[Items] - /** Using the same arguments as in `search`, this returns a summary - * and not the results. + /** Using the same arguments as in `search`, this returns a summary and not the results. */ def searchSummary( settings: StatsSettings )(q: Query, fulltextQuery: Option[String]): F[SearchSummary] - /** Calls `search` by parsing the given query string into a query that - * is then amended wtih the given `fix` query. + /** Calls `search` by parsing the given query string into a query that is then amended + * wtih the given `fix` query. */ final def searchByString( settings: Settings @@ -52,8 +48,7 @@ trait OSimpleSearch[F[_]] { ): F[StringSearchResult[Items]] = OSimpleSearch.applySearch[F, Items](fix, q)((iq, fts) => search(settings)(iq, fts)) - /** Same as `searchByString` but returning a summary instead of the - * results. + /** Same as `searchByString` but returning a summary instead of the results. */ final def searchSummaryByString( settings: StatsSettings @@ -190,8 +185,8 @@ object OSimpleSearch { } } - /** Calls `run` with one of the success results when extracting the - * fulltext search node from the query. + /** Calls `run` with one of the success results when extracting the fulltext search node + * from the query. */ private def runQuery[F[_]: Applicative, A]( itemQuery: Option[ItemQuery] @@ -211,10 +206,9 @@ object OSimpleSearch { final class Impl[F[_]: Sync](fts: OFulltext[F], is: OItemSearch[F]) extends OSimpleSearch[F] { - /** Implements searching like this: it exploits the fact that teh - * fulltext index only contains valid items. When searching via - * sql the query expression selecting only valid items is added - * here. + /** Implements searching like this: it exploits the fact that teh fulltext index only + * contains valid items. When searching via sql the query expression selecting only + * valid items is added here. */ def search( settings: Settings diff --git a/modules/backend/src/main/scala/docspell/backend/ops/OUpload.scala b/modules/backend/src/main/scala/docspell/backend/ops/OUpload.scala index cb92f251..2e508a55 100644 --- a/modules/backend/src/main/scala/docspell/backend/ops/OUpload.scala +++ b/modules/backend/src/main/scala/docspell/backend/ops/OUpload.scala @@ -31,10 +31,9 @@ trait OUpload[F[_]] { itemId: Option[Ident] ): F[OUpload.UploadResult] - /** Submit files via a given source identifier. The source is looked - * up to identify the collective the files belong to. Metadata - * defined in the source is used as a fallback to those specified - * here (in UploadData). + /** Submit files via a given source identifier. The source is looked up to identify the + * collective the files belong to. Metadata defined in the source is used as a fallback + * to those specified here (in UploadData). */ def submit( data: OUpload.UploadData[F], @@ -103,8 +102,7 @@ object OUpload { def noSource: UploadResult = NoSource - /** When adding files to an item, no item was found using the given - * item-id. + /** When adding files to an item, no item was found using the given item-id. */ case object NoItem extends UploadResult diff --git a/modules/backend/src/main/scala/docspell/backend/ops/OUserTask.scala b/modules/backend/src/main/scala/docspell/backend/ops/OUserTask.scala index c170b97b..b2d78ccf 100644 --- a/modules/backend/src/main/scala/docspell/backend/ops/OUserTask.scala +++ b/modules/backend/src/main/scala/docspell/backend/ops/OUserTask.scala @@ -37,8 +37,7 @@ trait OUserTask[F[_]] { task: UserTask[ScanMailboxArgs] ): F[Unit] - /** Return the settings for all the notify-due-items task of the - * current user. + /** Return the settings for all the notify-due-items task of the current user. */ def getNotifyDueItems(scope: UserTaskScope): Stream[F, UserTask[NotifyDueItemsArgs]] @@ -59,9 +58,8 @@ trait OUserTask[F[_]] { /** Removes a user task with the given id. */ def deleteTask(scope: UserTaskScope, id: Ident): F[Unit] - /** Discards the schedule and immediately submits the task to the job - * executor's queue. It will not update the corresponding periodic - * task. + /** Discards the schedule and immediately submits the task to the job executor's queue. + * It will not update the corresponding periodic task. */ def executeNow[A](scope: UserTaskScope, subject: Option[String], task: UserTask[A])( implicit E: Encoder[A] diff --git a/modules/backend/src/main/scala/docspell/backend/ops/SendResult.scala b/modules/backend/src/main/scala/docspell/backend/ops/SendResult.scala index 4ada98b5..b175d7bf 100644 --- a/modules/backend/src/main/scala/docspell/backend/ops/SendResult.scala +++ b/modules/backend/src/main/scala/docspell/backend/ops/SendResult.scala @@ -16,8 +16,7 @@ object SendResult { */ case class Success(id: Ident) extends SendResult - /** There was a failure sending the mail. The mail is then not saved - * to db. + /** There was a failure sending the mail. The mail is then not saved to db. */ case class SendFailure(ex: Throwable) extends SendResult @@ -25,8 +24,7 @@ object SendResult { */ case class StoreFailure(ex: Throwable) extends SendResult - /** Something could not be found required for sending (mail configs, - * items etc). + /** Something could not be found required for sending (mail configs, items etc). */ case object NotFound extends SendResult } diff --git a/modules/common/src/main/scala/docspell/common/AllPreviewsArgs.scala b/modules/common/src/main/scala/docspell/common/AllPreviewsArgs.scala index a5815e9e..9103d2c2 100644 --- a/modules/common/src/main/scala/docspell/common/AllPreviewsArgs.scala +++ b/modules/common/src/main/scala/docspell/common/AllPreviewsArgs.scala @@ -9,12 +9,11 @@ package docspell.common import io.circe.generic.semiauto._ import io.circe.{Decoder, Encoder} -/** Arguments for the `AllPreviewsTask` that submits tasks to - * generates a preview image for attachments. +/** Arguments for the `AllPreviewsTask` that submits tasks to generates a preview image + * for attachments. * - * It can replace the current preview image or only generate one, if - * it is missing. If no collective is specified, it considers all - * attachments. + * It can replace the current preview image or only generate one, if it is missing. If no + * collective is specified, it considers all attachments. */ case class AllPreviewsArgs( collective: Option[Ident], diff --git a/modules/common/src/main/scala/docspell/common/CollectiveState.scala b/modules/common/src/main/scala/docspell/common/CollectiveState.scala index 1846c0db..f39e50d1 100644 --- a/modules/common/src/main/scala/docspell/common/CollectiveState.scala +++ b/modules/common/src/main/scala/docspell/common/CollectiveState.scala @@ -15,17 +15,15 @@ object CollectiveState { /** A normal active collective */ case object Active extends CollectiveState - /** A collective may be readonly in cases it is implicitly closed - * (e.g. no payment). Users can still see there data and - * download, but have no write access. + /** A collective may be readonly in cases it is implicitly closed (e.g. no payment). + * Users can still see there data and download, but have no write access. */ case object ReadOnly extends CollectiveState /** A collective that has been explicitely closed. */ case object Closed extends CollectiveState - /** A collective blocked by a super user, usually some emergency - * action. + /** A collective blocked by a super user, usually some emergency action. */ case object Blocked extends CollectiveState diff --git a/modules/common/src/main/scala/docspell/common/ConvertAllPdfArgs.scala b/modules/common/src/main/scala/docspell/common/ConvertAllPdfArgs.scala index a31beccf..e44d533a 100644 --- a/modules/common/src/main/scala/docspell/common/ConvertAllPdfArgs.scala +++ b/modules/common/src/main/scala/docspell/common/ConvertAllPdfArgs.scala @@ -9,14 +9,12 @@ package docspell.common import io.circe._ import io.circe.generic.semiauto._ -/** Arguments for the task that finds all pdf files that have not been - * converted and submits for each a job that will convert the file - * using ocrmypdf. +/** Arguments for the task that finds all pdf files that have not been converted and + * submits for each a job that will convert the file using ocrmypdf. * - * If the `collective` argument is present, then this task and the - * ones that are submitted by this task run in the realm of the - * collective (and only their files are considered). If it is empty, - * it is a system task and all files are considered. + * If the `collective` argument is present, then this task and the ones that are + * submitted by this task run in the realm of the collective (and only their files are + * considered). If it is empty, it is a system task and all files are considered. */ case class ConvertAllPdfArgs(collective: Option[Ident]) diff --git a/modules/common/src/main/scala/docspell/common/EmptyTrashArgs.scala b/modules/common/src/main/scala/docspell/common/EmptyTrashArgs.scala index 838bf912..8061f531 100644 --- a/modules/common/src/main/scala/docspell/common/EmptyTrashArgs.scala +++ b/modules/common/src/main/scala/docspell/common/EmptyTrashArgs.scala @@ -14,8 +14,8 @@ import io.circe.generic.semiauto._ /** Arguments to the empty-trash task. * - * This task is run periodically to really delete all soft-deleted - * items. These are items with state `ItemState.Deleted`. + * This task is run periodically to really delete all soft-deleted items. These are items + * with state `ItemState.Deleted`. */ case class EmptyTrashArgs( collective: Ident, diff --git a/modules/common/src/main/scala/docspell/common/FileName.scala b/modules/common/src/main/scala/docspell/common/FileName.scala index 3649d802..6866f132 100644 --- a/modules/common/src/main/scala/docspell/common/FileName.scala +++ b/modules/common/src/main/scala/docspell/common/FileName.scala @@ -14,8 +14,8 @@ case class FileName private (name: String) { case n => (name.take(n), Some(name.drop(n + 1))) } - /** Returns the name part without the extension. If there is no - * extension, it is the same as fullname. + /** Returns the name part without the extension. If there is no extension, it is the + * same as fullname. */ def baseName: String = base @@ -27,20 +27,20 @@ case class FileName private (name: String) { def fullName: String = name - /** Creates a new name where part is spliced into the name before the - * extension, separated by separator. + /** Creates a new name where part is spliced into the name before the extension, + * separated by separator. */ def withPart(part: String, sep: Char): FileName = if (part.isEmpty()) this else ext - .map(e => new FileName(s"${base}${sep}${part}.${e}")) - .getOrElse(new FileName(s"${base}${sep}${part}")) + .map(e => new FileName(s"$base$sep$part.$e")) + .getOrElse(new FileName(s"$base$sep$part")) /** Create a new name using the given extension. */ def withExtension(newExt: String): FileName = if (newExt.isEmpty()) new FileName(base) - else new FileName(s"${base}.${newExt}") + else new FileName(s"$base.$newExt") } object FileName { diff --git a/modules/common/src/main/scala/docspell/common/Glob.scala b/modules/common/src/main/scala/docspell/common/Glob.scala index 52795bf8..297660b1 100644 --- a/modules/common/src/main/scala/docspell/common/Glob.scala +++ b/modules/common/src/main/scala/docspell/common/Glob.scala @@ -16,14 +16,11 @@ trait Glob { /** Matches the input string against this glob. */ def matches(caseSensitive: Boolean)(in: String): Boolean - /** If this glob consists of multiple segments, it is the same as - * `matches`. If it is only a single segment, it is matched against - * the last segment of the input string that is assumed to be a - * pathname separated by slash. + /** If this glob consists of multiple segments, it is the same as `matches`. If it is + * only a single segment, it is matched against the last segment of the input string + * that is assumed to be a pathname separated by slash. * - * Example: - * test.* <> "/a/b/test.txt" => true - * /test.* <> "/a/b/test.txt" => false + * Example: test.* <> "/a/b/test.txt" => true /test.* <> "/a/b/test.txt" => false */ def matchFilenameOrPath(in: String): Boolean diff --git a/modules/common/src/main/scala/docspell/common/JobState.scala b/modules/common/src/main/scala/docspell/common/JobState.scala index 4eb687a4..4ac493ec 100644 --- a/modules/common/src/main/scala/docspell/common/JobState.scala +++ b/modules/common/src/main/scala/docspell/common/JobState.scala @@ -20,8 +20,7 @@ object JobState { /** Waiting for being executed. */ case object Waiting extends JobState {} - /** A scheduler has picked up this job and will pass it to the next - * free slot. + /** A scheduler has picked up this job and will pass it to the next free slot. */ case object Scheduled extends JobState {} diff --git a/modules/common/src/main/scala/docspell/common/LearnClassifierArgs.scala b/modules/common/src/main/scala/docspell/common/LearnClassifierArgs.scala index 54be5407..2917358b 100644 --- a/modules/common/src/main/scala/docspell/common/LearnClassifierArgs.scala +++ b/modules/common/src/main/scala/docspell/common/LearnClassifierArgs.scala @@ -13,9 +13,9 @@ import io.circe.generic.semiauto._ /** Arguments to the classify-item task. * - * This task is run periodically and learns from existing documents - * to create a model for predicting tags of new documents. The user - * must give a tag category as a subset of possible tags.. + * This task is run periodically and learns from existing documents to create a model for + * predicting tags of new documents. The user must give a tag category as a subset of + * possible tags.. */ case class LearnClassifierArgs( collective: Ident diff --git a/modules/common/src/main/scala/docspell/common/MakePageCountArgs.scala b/modules/common/src/main/scala/docspell/common/MakePageCountArgs.scala index 7bb4959f..9e635a00 100644 --- a/modules/common/src/main/scala/docspell/common/MakePageCountArgs.scala +++ b/modules/common/src/main/scala/docspell/common/MakePageCountArgs.scala @@ -9,9 +9,8 @@ package docspell.common import io.circe.generic.semiauto._ import io.circe.{Decoder, Encoder} -/** Arguments for the `MakePageCountTask` that reads the number of - * pages for an attachment and stores it into the meta data of the - * attachment. +/** Arguments for the `MakePageCountTask` that reads the number of pages for an attachment + * and stores it into the meta data of the attachment. */ case class MakePageCountArgs( attachment: Ident diff --git a/modules/common/src/main/scala/docspell/common/MakePreviewArgs.scala b/modules/common/src/main/scala/docspell/common/MakePreviewArgs.scala index 646ff473..3c4b377c 100644 --- a/modules/common/src/main/scala/docspell/common/MakePreviewArgs.scala +++ b/modules/common/src/main/scala/docspell/common/MakePreviewArgs.scala @@ -9,11 +9,9 @@ package docspell.common import io.circe.generic.semiauto._ import io.circe.{Decoder, Encoder} -/** Arguments for the `MakePreviewTask` that generates a preview image - * for an attachment. +/** Arguments for the `MakePreviewTask` that generates a preview image for an attachment. * - * It can replace the current preview image or only generate one, if - * it is missing. + * It can replace the current preview image or only generate one, if it is missing. */ case class MakePreviewArgs( attachment: Ident, diff --git a/modules/common/src/main/scala/docspell/common/MetaProposal.scala b/modules/common/src/main/scala/docspell/common/MetaProposal.scala index 34322690..6215c1f4 100644 --- a/modules/common/src/main/scala/docspell/common/MetaProposal.scala +++ b/modules/common/src/main/scala/docspell/common/MetaProposal.scala @@ -20,14 +20,12 @@ import io.circe.generic.semiauto._ /** A proposed meta data to an item. * - * There is only one value for each proposal type. The list of - * candidates is meant to be ordered from the best match to the - * lowest match. + * There is only one value for each proposal type. The list of candidates is meant to be + * ordered from the best match to the lowest match. * - * The candidate is already "resolved" against the database and - * contains a valid record (with its ID and a human readable name). - * Additionally it carries a set of "labels" (which may be empty) - * that are the source of this candidate. + * The candidate is already "resolved" against the database and contains a valid record + * (with its ID and a human readable name). Additionally it carries a set of "labels" + * (which may be empty) that are the source of this candidate. */ case class MetaProposal(proposalType: MetaProposalType, values: NonEmptyList[Candidate]) { @@ -96,8 +94,8 @@ object MetaProposal { } } - /** Merges candidates with same `IdRef` values and concatenates their - * respective labels. The candidate order is preserved. + /** Merges candidates with same `IdRef` values and concatenates their respective labels. + * The candidate order is preserved. */ def flatten(s: NonEmptyList[Candidate]): NonEmptyList[Candidate] = { def mergeInto( diff --git a/modules/common/src/main/scala/docspell/common/MetaProposalList.scala b/modules/common/src/main/scala/docspell/common/MetaProposalList.scala index 58dd2649..c3066188 100644 --- a/modules/common/src/main/scala/docspell/common/MetaProposalList.scala +++ b/modules/common/src/main/scala/docspell/common/MetaProposalList.scala @@ -91,13 +91,12 @@ object MetaProposalList { .getOrElse(empty) def fromMap(m: Map[MetaProposalType, MetaProposal]): MetaProposalList = - new MetaProposalList(m.toList.map({ case (k, v) => v.copy(proposalType = k) })) + new MetaProposalList(m.toList.map { case (k, v) => v.copy(proposalType = k) }) - /** Flattens the given list of meta-proposals into a single list, - * where each meta-proposal type exists at most once. Candidates to - * equal proposal-types are merged together. The candidate's order - * is preserved and candidates of proposals are appended as given - * by the order of the given `seq'. + /** Flattens the given list of meta-proposals into a single list, where each + * meta-proposal type exists at most once. Candidates to equal proposal-types are + * merged together. The candidate's order is preserved and candidates of proposals are + * appended as given by the order of the given `seq'. */ def flatten(ml: Seq[MetaProposalList]): MetaProposalList = flatten0( diff --git a/modules/common/src/main/scala/docspell/common/NotifyDueItemsArgs.scala b/modules/common/src/main/scala/docspell/common/NotifyDueItemsArgs.scala index de37cc59..6e6a25c5 100644 --- a/modules/common/src/main/scala/docspell/common/NotifyDueItemsArgs.scala +++ b/modules/common/src/main/scala/docspell/common/NotifyDueItemsArgs.scala @@ -13,11 +13,10 @@ import io.circe.generic.semiauto._ /** Arguments to the notification task. * - * This tasks queries items with a due date and informs the user via - * mail. + * This tasks queries items with a due date and informs the user via mail. * - * If the structure changes, there must be some database migration to - * update or remove the json data of the corresponding task. + * If the structure changes, there must be some database migration to update or remove + * the json data of the corresponding task. */ case class NotifyDueItemsArgs( account: AccountId, diff --git a/modules/common/src/main/scala/docspell/common/ProcessItemArgs.scala b/modules/common/src/main/scala/docspell/common/ProcessItemArgs.scala index e6a934d9..def1cfd6 100644 --- a/modules/common/src/main/scala/docspell/common/ProcessItemArgs.scala +++ b/modules/common/src/main/scala/docspell/common/ProcessItemArgs.scala @@ -14,11 +14,11 @@ import io.circe.generic.semiauto._ /** Arguments to the process-item task. * - * This task is run for each new file to create a new item from it or - * to add this file as an attachment to an existing item. + * This task is run for each new file to create a new item from it or to add this file as + * an attachment to an existing item. * - * If the `itemId' is set to some value, the item is tried to load to - * ammend with the given files. Otherwise a new item is created. + * If the `itemId' is set to some value, the item is tried to load to ammend with the + * given files. Otherwise a new item is created. * * It is also re-used by the 'ReProcessItem' task. */ diff --git a/modules/common/src/main/scala/docspell/common/ReProcessItemArgs.scala b/modules/common/src/main/scala/docspell/common/ReProcessItemArgs.scala index db107737..3c304dff 100644 --- a/modules/common/src/main/scala/docspell/common/ReProcessItemArgs.scala +++ b/modules/common/src/main/scala/docspell/common/ReProcessItemArgs.scala @@ -11,10 +11,9 @@ import io.circe.{Decoder, Encoder} /** Arguments when re-processing an item. * - * The `itemId` must exist and point to some item. If the attachment - * list is non-empty, only those attachments are re-processed. They - * must belong to the given item. If the list is empty, then all - * attachments are re-processed. + * The `itemId` must exist and point to some item. If the attachment list is non-empty, + * only those attachments are re-processed. They must belong to the given item. If the + * list is empty, then all attachments are re-processed. */ case class ReProcessItemArgs(itemId: Ident, attachments: List[Ident]) diff --git a/modules/common/src/main/scala/docspell/common/ScanMailboxArgs.scala b/modules/common/src/main/scala/docspell/common/ScanMailboxArgs.scala index 9da5d08c..4b00f74b 100644 --- a/modules/common/src/main/scala/docspell/common/ScanMailboxArgs.scala +++ b/modules/common/src/main/scala/docspell/common/ScanMailboxArgs.scala @@ -13,11 +13,10 @@ import io.circe.generic.semiauto._ /** Arguments to the poll-mailbox task. * - * This tasks queries user mailboxes and pushes found mails into - * docspell for processing. + * This tasks queries user mailboxes and pushes found mails into docspell for processing. * - * If the structure changes, there must be some database migration to - * update or remove the json data of the corresponding task. + * If the structure changes, there must be some database migration to update or remove + * the json data of the corresponding task. */ case class ScanMailboxArgs( // the docspell user account diff --git a/modules/convert/src/main/scala/docspell/convert/ConversionResult.scala b/modules/convert/src/main/scala/docspell/convert/ConversionResult.scala index a8d749aa..6d6cd55b 100644 --- a/modules/convert/src/main/scala/docspell/convert/ConversionResult.scala +++ b/modules/convert/src/main/scala/docspell/convert/ConversionResult.scala @@ -19,10 +19,9 @@ sealed trait ConversionResult[F[_]] { object ConversionResult { - /** The conversion is done by external tools that write files to the - * file system. These are temporary files and they will be deleted - * once the process finishes. This handler is used to do something - * relevant with the resulting files. + /** The conversion is done by external tools that write files to the file system. These + * are temporary files and they will be deleted once the process finishes. This handler + * is used to do something relevant with the resulting files. */ type Handler[F[_], A] = Kleisli[F, ConversionResult[F], A] diff --git a/modules/convert/src/main/scala/docspell/convert/SanitizeHtml.scala b/modules/convert/src/main/scala/docspell/convert/SanitizeHtml.scala index b73b9bdd..42abf68f 100644 --- a/modules/convert/src/main/scala/docspell/convert/SanitizeHtml.scala +++ b/modules/convert/src/main/scala/docspell/convert/SanitizeHtml.scala @@ -12,11 +12,10 @@ import scodec.bits.ByteVector @FunctionalInterface trait SanitizeHtml { - /** The given `bytes' are html which can be modified to strip out - * unwanted content. + /** The given `bytes' are html which can be modified to strip out unwanted content. * - * The result should use the same character encoding as the given - * charset implies, or utf8 if not specified. + * The result should use the same character encoding as the given charset implies, or + * utf8 if not specified. */ def apply(bytes: ByteVector, charset: Option[Charset]): ByteVector diff --git a/modules/convert/src/main/scala/docspell/convert/extern/ExternConv.scala b/modules/convert/src/main/scala/docspell/convert/extern/ExternConv.scala index 131ec564..98d73d6c 100644 --- a/modules/convert/src/main/scala/docspell/convert/extern/ExternConv.scala +++ b/modules/convert/src/main/scala/docspell/convert/extern/ExternConv.scala @@ -132,7 +132,7 @@ private[extern] object ExternConv { ): Pipe[F, Byte, Unit] = in => Stream - .eval(logger.debug(s"Storing input to file ${inFile} for running $name")) + .eval(logger.debug(s"Storing input to file $inFile for running $name")) .drain ++ Stream.eval(storeFile(in, inFile)) diff --git a/modules/convert/src/test/scala/docspell/convert/ConversionTest.scala b/modules/convert/src/test/scala/docspell/convert/ConversionTest.scala index ebe57b9c..a6fa98ca 100644 --- a/modules/convert/src/test/scala/docspell/convert/ConversionTest.scala +++ b/modules/convert/src/test/scala/docspell/convert/ConversionTest.scala @@ -150,12 +150,12 @@ class ConversionTest extends FunSuite with FileChecks { conversion .use { conv => def check: Handler[IO, Unit] = - Kleisli({ + Kleisli { case ConversionResult.InputMalformed(_, _) => ().pure[IO] case cr => IO.raiseError(new Exception(s"Unexpected result: $cr")) - }) + } runConversion(bombs, _ => check, conv).compile.drain } @@ -171,12 +171,12 @@ class ConversionTest extends FunSuite with FileChecks { .emits(uris) .covary[IO] .zipWithIndex - .evalMap({ case (uri, index) => + .evalMap { case (uri, index) => val load = uri.readURL[IO](8192) val dataType = DataType.filename(uri.path.segments.last) logger.info(s"Processing file ${uri.path.asString}") *> conv.toPDF(dataType, Language.German, handler(index))(load) - }) + } def commandsExist: Boolean = commandExists(convertConfig.unoconv.command.program) && diff --git a/modules/convert/src/test/scala/docspell/convert/FileChecks.scala b/modules/convert/src/test/scala/docspell/convert/FileChecks.scala index a8410160..12316c51 100644 --- a/modules/convert/src/test/scala/docspell/convert/FileChecks.scala +++ b/modules/convert/src/test/scala/docspell/convert/FileChecks.scala @@ -48,7 +48,7 @@ trait FileChecks { storePdfTxtHandler(file, file.resolveSibling("unexpected.txt")).map(_._1) def storePdfTxtHandler(filePdf: Path, fileTxt: Path): Handler[IO, (Path, Path)] = - Kleisli({ + Kleisli { case ConversionResult.SuccessPdfTxt(pdf, txt) => for { pout <- pdf.through(storeFile(filePdf)).compile.lastOrError @@ -64,7 +64,7 @@ trait FileChecks { case cr => throw new Exception(s"Unexpected result: $cr") - }) + } def commandExists(cmd: String): Boolean = Runtime.getRuntime.exec(Array("which", cmd)).waitFor() == 0 diff --git a/modules/extract/src/main/scala/docspell/extract/ocr/Ocr.scala b/modules/extract/src/main/scala/docspell/extract/ocr/Ocr.scala index fcf5836d..982041bc 100644 --- a/modules/extract/src/main/scala/docspell/extract/ocr/Ocr.scala +++ b/modules/extract/src/main/scala/docspell/extract/ocr/Ocr.scala @@ -62,8 +62,8 @@ object Ocr { ): Stream[F, String] = runTesseractFile(img, logger, lang, config) - /** Run ghostscript to extract all pdf pages into tiff files. The - * files are stored to a temporary location on disk and returned. + /** Run ghostscript to extract all pdf pages into tiff files. The files are stored to a + * temporary location on disk and returned. */ private[extract] def runGhostscript[F[_]: Async]( pdf: Stream[F, Byte], @@ -88,8 +88,8 @@ object Ocr { .flatMap(_ => File.listFiles(pathEndsWith(".tif"), wd)) } - /** Run ghostscript to extract all pdf pages into tiff files. The - * files are stored to a temporary location on disk and returned. + /** Run ghostscript to extract all pdf pages into tiff files. The files are stored to a + * temporary location on disk and returned. */ private[extract] def runGhostscriptFile[F[_]: Async]( pdf: Path, @@ -111,8 +111,8 @@ object Ocr { private def pathEndsWith(ext: String): Path => Boolean = p => p.fileName.toString.endsWith(ext) - /** Run unpaper to optimize the image for ocr. The - * files are stored to a temporary location on disk and returned. + /** Run unpaper to optimize the image for ocr. The files are stored to a temporary + * location on disk and returned. */ private[extract] def runUnpaperFile[F[_]: Async]( img: Path, @@ -139,8 +139,7 @@ object Ocr { } } - /** Run tesseract on the given image file and return the extracted - * text. + /** Run tesseract on the given image file and return the extracted text. */ private[extract] def runTesseractFile[F[_]: Async]( img: Path, @@ -160,8 +159,7 @@ object Ocr { .map(_.stdout) } - /** Run tesseract on the given image file and return the extracted - * text. + /** Run tesseract on the given image file and return the extracted text. */ private[extract] def runTesseractStdin[F[_]: Async]( img: Stream[F, Byte], diff --git a/modules/extract/src/main/scala/docspell/extract/ocr/TextExtract.scala b/modules/extract/src/main/scala/docspell/extract/ocr/TextExtract.scala index 30e754b6..e60b9daf 100644 --- a/modules/extract/src/main/scala/docspell/extract/ocr/TextExtract.scala +++ b/modules/extract/src/main/scala/docspell/extract/ocr/TextExtract.scala @@ -31,7 +31,7 @@ object TextExtract { ): Stream[F, Text] = Stream .eval(TikaMimetype.detect(in, MimeTypeHint.none)) - .flatMap({ + .flatMap { case MimeType.pdf => Stream.eval(Ocr.extractPdf(in, logger, lang, config)).unNoneTerminate @@ -40,7 +40,7 @@ object TextExtract { case mt => raiseError(s"File `$mt` not supported") - }) + } .map(Text.apply) private def raiseError[F[_]: Sync](msg: String): Stream[F, Nothing] = diff --git a/modules/extract/src/main/scala/docspell/extract/poi/PoiExtract.scala b/modules/extract/src/main/scala/docspell/extract/poi/PoiExtract.scala index fc7e57ba..73c38c35 100644 --- a/modules/extract/src/main/scala/docspell/extract/poi/PoiExtract.scala +++ b/modules/extract/src/main/scala/docspell/extract/poi/PoiExtract.scala @@ -49,17 +49,13 @@ object PoiExtract { case PoiType.docx => getDocx(data) case PoiType.msoffice => - EitherT(getDoc[F](data)) - .recoverWith({ case _ => - EitherT(getXls[F](data)) - }) - .value + EitherT(getDoc[F](data)).recoverWith { case _ => + EitherT(getXls[F](data)) + }.value case PoiType.ooxml => - EitherT(getDocx[F](data)) - .recoverWith({ case _ => - EitherT(getXlsx[F](data)) - }) - .value + EitherT(getDocx[F](data)).recoverWith { case _ => + EitherT(getXlsx[F](data)) + }.value case mt => Sync[F].pure(Left(new Exception(s"Unsupported content: ${mt.asString}"))) } diff --git a/modules/extract/src/test/scala/docspell/extract/pdfbox/PdfboxPreviewTest.scala b/modules/extract/src/test/scala/docspell/extract/pdfbox/PdfboxPreviewTest.scala index ddafa271..20c0518a 100644 --- a/modules/extract/src/test/scala/docspell/extract/pdfbox/PdfboxPreviewTest.scala +++ b/modules/extract/src/test/scala/docspell/extract/pdfbox/PdfboxPreviewTest.scala @@ -19,8 +19,8 @@ import munit._ class PdfboxPreviewTest extends FunSuite { val testPDFs = List( - ExampleFiles.letter_de_pdf -> "7d98be75b239816d6c751b3f3c56118ebf1a4632c43baf35a68a662f9d595ab8", - ExampleFiles.letter_en_pdf -> "2bffbd01634525c6ce1fe477de23464e038055c4917afa41dd6186fe03a49f5b", + ExampleFiles.letter_de_pdf -> "7d98be75b239816d6c751b3f3c56118ebf1a4632c43baf35a68a662f9d595ab8", + ExampleFiles.letter_en_pdf -> "2bffbd01634525c6ce1fe477de23464e038055c4917afa41dd6186fe03a49f5b", ExampleFiles.scanner_pdf13_pdf -> "05ce4fd686b3d24b0e2d60df0c6d79b1df2338fcf7a6957e34cb4d11c65682b4" ) diff --git a/modules/files/src/main/scala/docspell/files/ImageSize.scala b/modules/files/src/main/scala/docspell/files/ImageSize.scala index 667fc703..d40fd706 100644 --- a/modules/files/src/main/scala/docspell/files/ImageSize.scala +++ b/modules/files/src/main/scala/docspell/files/ImageSize.scala @@ -20,20 +20,17 @@ import fs2.io.file.Path object ImageSize { - /** Return the image size from its header without reading - * the whole image into memory. + /** Return the image size from its header without reading the whole image into memory. */ def get(file: Path): Option[Dimension] = Using(new FileImageInputStream(file.toNioPath.toFile))(getDimension).toOption.flatten - /** Return the image size from its header without reading - * the whole image into memory. + /** Return the image size from its header without reading the whole image into memory. */ def get(in: InputStream): Option[Dimension] = Option(ImageIO.createImageInputStream(in)).flatMap(getDimension) - /** Return the image size from its header without reading - * the whole image into memory. + /** Return the image size from its header without reading the whole image into memory. */ def get[F[_]: Sync](data: Stream[F, Byte]): F[Option[Dimension]] = data.take(768).compile.to(Array).map { ar => diff --git a/modules/fts-client/src/main/scala/docspell/ftsclient/FtsClient.scala b/modules/fts-client/src/main/scala/docspell/ftsclient/FtsClient.scala index 87101c60..c6f961f0 100644 --- a/modules/fts-client/src/main/scala/docspell/ftsclient/FtsClient.scala +++ b/modules/fts-client/src/main/scala/docspell/ftsclient/FtsClient.scala @@ -14,32 +14,28 @@ import docspell.common._ import org.log4s.getLogger -/** The fts client is the interface for docspell to a fulltext search - * engine. +/** The fts client is the interface for docspell to a fulltext search engine. * - * It defines all operations required for integration into docspell. - * It uses data structures from docspell. Implementation modules need - * to translate it to the engine that provides the features. + * It defines all operations required for integration into docspell. It uses data + * structures from docspell. Implementation modules need to translate it to the engine + * that provides the features. */ trait FtsClient[F[_]] { - /** Initialization tasks. This can be used to setup the fulltext - * search engine. The implementation is expected to keep track of - * run migrations, so that running these is idempotent. For - * example, it may be run on each application start. + /** Initialization tasks. This can be used to setup the fulltext search engine. The + * implementation is expected to keep track of run migrations, so that running these is + * idempotent. For example, it may be run on each application start. * - * Initialization may involve re-indexing all data, therefore it - * must run outside the scope of this client. The migration may - * include a task that applies any work and/or it can return a - * result indicating that after this task a re-index is necessary. + * Initialization may involve re-indexing all data, therefore it must run outside the + * scope of this client. The migration may include a task that applies any work and/or + * it can return a result indicating that after this task a re-index is necessary. */ def initialize: F[List[FtsMigration[F]]] - /** A list of initialization tasks that can be run when re-creating - * the index. + /** A list of initialization tasks that can be run when re-creating the index. * - * This is not run on startup, but only when required, for example - * when re-creating the entire index. + * This is not run on startup, but only when required, for example when re-creating the + * entire index. */ def initializeNew: List[FtsMigration[F]] @@ -53,18 +49,16 @@ trait FtsClient[F[_]] { else Stream.emit(result) ++ searchAll(q.nextPage) } - /** Push all data to the index. Data with same `id' is replaced. - * Values that are `None' are removed from the index (or set to an - * empty string). + /** Push all data to the index. Data with same `id' is replaced. Values that are `None' + * are removed from the index (or set to an empty string). */ def indexData(logger: Logger[F], data: Stream[F, TextData]): F[Unit] def indexData(logger: Logger[F], data: TextData*): F[Unit] = indexData(logger, Stream.emits(data)) - /** Push all data to the index, but only update existing entries. No - * new entries are created and values that are given as `None' are - * skipped. + /** Push all data to the index, but only update existing entries. No new entries are + * created and values that are given as `None' are skipped. */ def updateIndex(logger: Logger[F], data: Stream[F, TextData]): F[Unit] diff --git a/modules/fts-client/src/main/scala/docspell/ftsclient/FtsQuery.scala b/modules/fts-client/src/main/scala/docspell/ftsclient/FtsQuery.scala index 1f8c19b1..f64d6d1c 100644 --- a/modules/fts-client/src/main/scala/docspell/ftsclient/FtsQuery.scala +++ b/modules/fts-client/src/main/scala/docspell/ftsclient/FtsQuery.scala @@ -10,16 +10,14 @@ import docspell.common._ /** A fulltext query. * - * The query itself is a raw string. Each implementation may - * interpret it according to the system in use. + * The query itself is a raw string. Each implementation may interpret it according to + * the system in use. * - * Searches must only look for given collective and in the given list - * of item ids, if it is non-empty. If the item set is empty, then - * don't restrict the result in this way. + * Searches must only look for given collective and in the given list of item ids, if it + * is non-empty. If the item set is empty, then don't restrict the result in this way. * - * The set of folders must be used to restrict the results only to - * items that have one of the folders set or no folder set. If the - * set is empty, the restriction does not apply. + * The set of folders must be used to restrict the results only to items that have one of + * the folders set or no folder set. If the set is empty, the restriction does not apply. */ final case class FtsQuery( q: String, diff --git a/modules/fts-solr/src/main/scala/docspell/ftssolr/SolrSetup.scala b/modules/fts-solr/src/main/scala/docspell/ftssolr/SolrSetup.scala index 8349e443..6aa7dd36 100644 --- a/modules/fts-solr/src/main/scala/docspell/ftssolr/SolrSetup.scala +++ b/modules/fts-solr/src/main/scala/docspell/ftssolr/SolrSetup.scala @@ -55,8 +55,8 @@ object SolrSetup { } yield migs def setupSchema: List[SolrMigration[F]] = { - val verDoc = VersionDoc(versionDocId, allMigrations.map(_.value.version).max) - val solrUp = SolrUpdate(cfg, client) + val verDoc = VersionDoc(versionDocId, allMigrations.map(_.value.version).max) + val solrUp = SolrUpdate(cfg, client) val writeVersion = SolrMigration.writeVersion(solrUp, verDoc) val deleteAll = SolrMigration.deleteData(0, solrUp) val indexAll = SolrMigration.indexAll[F](Int.MaxValue, "Index all data") diff --git a/modules/joex/src/main/scala/docspell/joex/JoexApp.scala b/modules/joex/src/main/scala/docspell/joex/JoexApp.scala index bafb2e7a..537e746b 100644 --- a/modules/joex/src/main/scala/docspell/joex/JoexApp.scala +++ b/modules/joex/src/main/scala/docspell/joex/JoexApp.scala @@ -22,10 +22,9 @@ trait JoexApp[F[_]] { /** Shuts down the job executor. * - * It will immediately stop taking new jobs, waiting for currently - * running jobs to complete normally (i.e. running jobs are not - * canceled). After this completed, the webserver stops and the - * main loop will exit. + * It will immediately stop taking new jobs, waiting for currently running jobs to + * complete normally (i.e. running jobs are not canceled). After this completed, the + * webserver stops and the main loop will exit. */ def initShutdown: F[Unit] } diff --git a/modules/joex/src/main/scala/docspell/joex/analysis/NerFile.scala b/modules/joex/src/main/scala/docspell/joex/analysis/NerFile.scala index 2c57447f..c7d73fd1 100644 --- a/modules/joex/src/main/scala/docspell/joex/analysis/NerFile.scala +++ b/modules/joex/src/main/scala/docspell/joex/analysis/NerFile.scala @@ -97,7 +97,7 @@ object NerFile { private def sanitizeRegex(str: String): String = str.trim.toLowerCase.foldLeft("") { (res, ch) => - if (invalidChars.contains(ch)) s"${res}\\$ch" + if (invalidChars.contains(ch)) s"$res\\$ch" else s"$res$ch" } } diff --git a/modules/joex/src/main/scala/docspell/joex/analysis/RegexNerFile.scala b/modules/joex/src/main/scala/docspell/joex/analysis/RegexNerFile.scala index e2a8d78f..a8c05613 100644 --- a/modules/joex/src/main/scala/docspell/joex/analysis/RegexNerFile.scala +++ b/modules/joex/src/main/scala/docspell/joex/analysis/RegexNerFile.scala @@ -22,8 +22,7 @@ import docspell.store.records.RPerson import io.circe.syntax._ import org.log4s.getLogger -/** Maintains a custom regex-ner file per collective for stanford's - * regexner annotator. +/** Maintains a custom regex-ner file per collective for stanford's regexner annotator. */ trait RegexNerFile[F[_]] { @@ -64,7 +63,7 @@ object RegexNerFile { val dur = Duration.between(nf.creation, now) if (dur > cfg.minTime) logger.fdebug( - s"Cache time elapsed (${dur} > ${cfg.minTime}). Check for new state." + s"Cache time elapsed ($dur > ${cfg.minTime}). Check for new state." ) *> updateFile( collective, now, @@ -141,7 +140,7 @@ object RegexNerFile { ) for { - _ <- logger.finfo(s"Generating custom NER file for collective '${collective.id}'") + _ <- logger.finfo(s"Generating custom NER file for collective '${collective.id}'") names <- store.transact(QCollective.allNames(collective, cfg.maxEntries)) nerFile = NerFile(collective, lastUpdate, now) _ <- update(nerFile, NerFile.mkNerConfig(names)) diff --git a/modules/joex/src/main/scala/docspell/joex/emptytrash/EmptyTrashTask.scala b/modules/joex/src/main/scala/docspell/joex/emptytrash/EmptyTrashTask.scala index b7bf76b7..8cd4d8a1 100644 --- a/modules/joex/src/main/scala/docspell/joex/emptytrash/EmptyTrashTask.scala +++ b/modules/joex/src/main/scala/docspell/joex/emptytrash/EmptyTrashTask.scala @@ -52,7 +52,7 @@ object EmptyTrashTask { s"Starting removing all soft-deleted items older than ${maxDate.asString}" ) nDeleted <- deleteAll(ctx.args, maxDate, itemOps, itemSearchOps, ctx) - _ <- ctx.logger.info(s"Finished deleting ${nDeleted} items") + _ <- ctx.logger.info(s"Finished deleting $nDeleted items") } yield () } diff --git a/modules/joex/src/main/scala/docspell/joex/fts/FtsWork.scala b/modules/joex/src/main/scala/docspell/joex/fts/FtsWork.scala index 9ce88c28..861360fe 100644 --- a/modules/joex/src/main/scala/docspell/joex/fts/FtsWork.scala +++ b/modules/joex/src/main/scala/docspell/joex/fts/FtsWork.scala @@ -101,7 +101,7 @@ object FtsWork { def recoverWith( other: FtsWork[F] )(implicit ev: ApplicativeError[F, Throwable]): FtsWork[F] = - Kleisli(ctx => mt.run(ctx).onError({ case _ => other.run(ctx) })) + Kleisli(ctx => mt.run(ctx).onError { case _ => other.run(ctx) }) def forContext( cfg: Config.FullTextSearch, diff --git a/modules/joex/src/main/scala/docspell/joex/fts/Migration.scala b/modules/joex/src/main/scala/docspell/joex/fts/Migration.scala index d23ab072..0471abc0 100644 --- a/modules/joex/src/main/scala/docspell/joex/fts/Migration.scala +++ b/modules/joex/src/main/scala/docspell/joex/fts/Migration.scala @@ -19,9 +19,8 @@ import docspell.store.Store /** Migrating the index from the previous version to this version. * - * The migration asks the fulltext search client for a list of - * migration tasks to run. It may be empty when there is no migration - * required. + * The migration asks the fulltext search client for a list of migration tasks to run. It + * may be empty when there is no migration required. */ case class Migration[F[_]]( version: Int, diff --git a/modules/joex/src/main/scala/docspell/joex/fts/package.scala b/modules/joex/src/main/scala/docspell/joex/fts/package.scala index 0e1d00ef..96c05b18 100644 --- a/modules/joex/src/main/scala/docspell/joex/fts/package.scala +++ b/modules/joex/src/main/scala/docspell/joex/fts/package.scala @@ -10,8 +10,7 @@ import cats.data.Kleisli package object fts { - /** Some work that must be done to advance the schema of the fulltext - * index. + /** Some work that must be done to advance the schema of the fulltext index. */ type FtsWork[F[_]] = Kleisli[F, FtsContext[F], Unit] diff --git a/modules/joex/src/main/scala/docspell/joex/learn/ClassifierName.scala b/modules/joex/src/main/scala/docspell/joex/learn/ClassifierName.scala index fb34bc23..078f6ff5 100644 --- a/modules/joex/src/main/scala/docspell/joex/learn/ClassifierName.scala +++ b/modules/joex/src/main/scala/docspell/joex/learn/ClassifierName.scala @@ -23,7 +23,7 @@ object ClassifierName { private val categoryPrefix = "tagcategory-" def tagCategory(cat: String): ClassifierName = - apply(s"${categoryPrefix}${cat}") + apply(s"$categoryPrefix$cat") val concernedPerson: ClassifierName = apply("concernedperson") @@ -56,7 +56,7 @@ object ClassifierName { def findOrphanTagModels[F[_]](coll: Ident): ConnectionIO[List[RClassifierModel]] = for { cats <- RClassifierSetting.getActiveCategories(coll) - allModels = RClassifierModel.findAllByQuery(coll, s"${categoryPrefix}%") + allModels = RClassifierModel.findAllByQuery(coll, s"$categoryPrefix%") result <- NonEmptyList.fromList(cats) match { case Some(nel) => allModels.flatMap(all => diff --git a/modules/joex/src/main/scala/docspell/joex/learn/Classify.scala b/modules/joex/src/main/scala/docspell/joex/learn/Classify.scala index 2e07dd9f..11d97aa4 100644 --- a/modules/joex/src/main/scala/docspell/joex/learn/Classify.scala +++ b/modules/joex/src/main/scala/docspell/joex/learn/Classify.scala @@ -47,7 +47,7 @@ object Classify { .flatMap(_ => classifier.classify(logger, ClassifierModel(modelFile), text)) }).filter(_ != LearnClassifierTask.noClass) .flatTapNone(logger.debug("Guessed: ")) - _ <- OptionT.liftF(logger.debug(s"Guessed: ${cls}")) + _ <- OptionT.liftF(logger.debug(s"Guessed: $cls")) } yield cls).value } diff --git a/modules/joex/src/main/scala/docspell/joex/notify/NotifyDueItemsTask.scala b/modules/joex/src/main/scala/docspell/joex/notify/NotifyDueItemsTask.scala index d196f390..b29ea792 100644 --- a/modules/joex/src/main/scala/docspell/joex/notify/NotifyDueItemsTask.scala +++ b/modules/joex/src/main/scala/docspell/joex/notify/NotifyDueItemsTask.scala @@ -40,7 +40,7 @@ object NotifyDueItemsTask { _ <- createMail(cfg, mailCfg, ctx) .semiflatMap { mail => for { - _ <- ctx.logger.info(s"Sending notification mail to ${ctx.args.recipients}") + _ <- ctx.logger.info(s"Sending notification mail to ${ctx.args.recipients}") res <- emil(mailCfg.toMailConfig).send(mail).map(_.head) _ <- ctx.logger.info(s"Sent mail with id: $res") } yield () diff --git a/modules/joex/src/main/scala/docspell/joex/pdfconv/PdfConvTask.scala b/modules/joex/src/main/scala/docspell/joex/pdfconv/PdfConvTask.scala index 4aa23327..9d74e038 100644 --- a/modules/joex/src/main/scala/docspell/joex/pdfconv/PdfConvTask.scala +++ b/modules/joex/src/main/scala/docspell/joex/pdfconv/PdfConvTask.scala @@ -26,9 +26,8 @@ import bitpeace.RangeDef import io.circe.generic.semiauto._ import io.circe.{Decoder, Encoder} -/** Converts the given attachment file using ocrmypdf if it is a pdf - * and has not already been converted (the source file is the same as - * in the attachment). +/** Converts the given attachment file using ocrmypdf if it is a pdf and has not already + * been converted (the source file is the same as in the attachment). */ object PdfConvTask { case class Args(attachId: Ident) @@ -100,7 +99,7 @@ object PdfConvTask { .through(bp.fetchData2(RangeDef.all)) val storeResult: ConversionResult.Handler[F, Unit] = - Kleisli({ + Kleisli { case ConversionResult.SuccessPdf(file) => storeToAttachment(ctx, in, file) @@ -109,15 +108,15 @@ object PdfConvTask { case ConversionResult.UnsupportedFormat(mime) => ctx.logger.warn( - s"Unable to convert '${mime}' file ${ctx.args}: unsupported format." + s"Unable to convert '$mime' file ${ctx.args}: unsupported format." ) case ConversionResult.InputMalformed(mime, reason) => - ctx.logger.warn(s"Unable to convert '${mime}' file ${ctx.args}: $reason") + ctx.logger.warn(s"Unable to convert '$mime' file ${ctx.args}: $reason") case ConversionResult.Failure(ex) => Sync[F].raiseError(ex) - }) + } def ocrMyPdf(lang: Language): F[Unit] = OcrMyPdf.toPDF[F, Unit]( diff --git a/modules/joex/src/main/scala/docspell/joex/process/AttachmentPageCount.scala b/modules/joex/src/main/scala/docspell/joex/process/AttachmentPageCount.scala index a8527f38..162e804a 100644 --- a/modules/joex/src/main/scala/docspell/joex/process/AttachmentPageCount.scala +++ b/modules/joex/src/main/scala/docspell/joex/process/AttachmentPageCount.scala @@ -22,9 +22,8 @@ import docspell.store.syntax.MimeTypes._ import bitpeace.{Mimetype, RangeDef} -/** Goes through all attachments that must be already converted into a - * pdf. If it is a pdf, the number of pages are retrieved and stored - * in the attachment metadata. +/** Goes through all attachments that must be already converted into a pdf. If it is a + * pdf, the number of pages are retrieved and stored in the attachment metadata. */ object AttachmentPageCount { diff --git a/modules/joex/src/main/scala/docspell/joex/process/AttachmentPreview.scala b/modules/joex/src/main/scala/docspell/joex/process/AttachmentPreview.scala index 3735ef20..d5f66e81 100644 --- a/modules/joex/src/main/scala/docspell/joex/process/AttachmentPreview.scala +++ b/modules/joex/src/main/scala/docspell/joex/process/AttachmentPreview.scala @@ -24,9 +24,9 @@ import docspell.store.syntax.MimeTypes._ import bitpeace.{Mimetype, MimetypeHint, RangeDef} -/** Goes through all attachments that must be already converted into a - * pdf. If it is a pdf, the first page is converted into a small - * preview png image and linked to the attachment. +/** Goes through all attachments that must be already converted into a pdf. If it is a + * pdf, the first page is converted into a small preview png image and linked to the + * attachment. */ object AttachmentPreview { diff --git a/modules/joex/src/main/scala/docspell/joex/process/ConvertPdf.scala b/modules/joex/src/main/scala/docspell/joex/process/ConvertPdf.scala index d6a2dbbb..102b4222 100644 --- a/modules/joex/src/main/scala/docspell/joex/process/ConvertPdf.scala +++ b/modules/joex/src/main/scala/docspell/joex/process/ConvertPdf.scala @@ -23,19 +23,16 @@ import docspell.store.syntax.MimeTypes._ import bitpeace.{Mimetype, MimetypeHint, RangeDef} -/** Goes through all attachments and creates a PDF version of it where - * supported. +/** Goes through all attachments and creates a PDF version of it where supported. * - * The `attachment` record is updated with the PDF version while the - * original file has been stored in the `attachment_source` record. + * The `attachment` record is updated with the PDF version while the original file has + * been stored in the `attachment_source` record. * - * If pdf conversion is not possible or if the input is already a - * pdf, both files are identical. That is, the `file_id`s point to - * the same file. Since the name of an attachment may be changed by - * the user, the `attachment_origin` record keeps that, too. + * If pdf conversion is not possible or if the input is already a pdf, both files are + * identical. That is, the `file_id`s point to the same file. Since the name of an + * attachment may be changed by the user, the `attachment_origin` record keeps that, too. * - * This step assumes an existing premature item, it traverses its - * attachments. + * This step assumes an existing premature item, it traverses its attachments. */ object ConvertPdf { @@ -104,7 +101,7 @@ object ConvertPdf { ra: RAttachment, item: ItemData ): Handler[F, (RAttachment, Option[RAttachmentMeta])] = - Kleisli({ + Kleisli { case ConversionResult.SuccessPdf(pdf) => ctx.logger.info(s"Conversion to pdf successful. Saving file.") *> storePDF(ctx, cfg, ra, pdf) @@ -142,7 +139,7 @@ object ConvertPdf { ctx.logger .error(s"PDF conversion failed: ${ex.getMessage}. Go without PDF file") *> (ra, None: Option[RAttachmentMeta]).pure[F] - }) + } private def storePDF[F[_]: Sync]( ctx: Context[F, ProcessItemArgs], @@ -196,7 +193,7 @@ object ConvertPdf { case Right(_) => ().pure[F] case Left(ex) => ctx.logger - .error(ex)(s"Cannot delete previous attachment file: ${raPrev}") + .error(ex)(s"Cannot delete previous attachment file: $raPrev") } } yield () diff --git a/modules/joex/src/main/scala/docspell/joex/process/CreateItem.scala b/modules/joex/src/main/scala/docspell/joex/process/CreateItem.scala index 8be2ef08..3c10db10 100644 --- a/modules/joex/src/main/scala/docspell/joex/process/CreateItem.scala +++ b/modules/joex/src/main/scala/docspell/joex/process/CreateItem.scala @@ -45,9 +45,9 @@ object CreateItem { Stream .emits(ctx.args.files) .flatMap(f => ctx.store.bitpeace.get(f.fileMetaId.id).map(fm => (f, fm))) - .collect({ case (f, Some(fm)) if isValidFile(fm) => f }) + .collect { case (f, Some(fm)) if isValidFile(fm) => f } .zipWithIndex - .evalMap({ case (f, index) => + .evalMap { case (f, index) => Ident .randomId[F] .map(id => @@ -60,7 +60,7 @@ object CreateItem { f.name ) ) - }) + } } .compile .toVector @@ -152,7 +152,7 @@ object CreateItem { .transact(RAttachment.findByItemCollectiveSource(ri.id, ri.cid, fids)) .flatTap(ats => ctx.logger.debug( - s"Found ${ats.size} attachments. Use only those from task args: ${fileMetaIds}" + s"Found ${ats.size} attachments. Use only those from task args: $fileMetaIds" ) ) ) diff --git a/modules/joex/src/main/scala/docspell/joex/process/CrossCheckProposals.scala b/modules/joex/src/main/scala/docspell/joex/process/CrossCheckProposals.scala index aa50ef20..fd1bf993 100644 --- a/modules/joex/src/main/scala/docspell/joex/process/CrossCheckProposals.scala +++ b/modules/joex/src/main/scala/docspell/joex/process/CrossCheckProposals.scala @@ -14,12 +14,10 @@ import cats.implicits._ import docspell.common._ import docspell.joex.scheduler.Task -/** After candidates have been determined, the set is reduced by doing - * some cross checks. For example: if a organization is suggested as - * correspondent, the correspondent person must be linked to that - * organization. So this *removes all* person candidates that are not - * linked to the first organization candidate (which will be linked - * to the item). +/** After candidates have been determined, the set is reduced by doing some cross checks. + * For example: if a organization is suggested as correspondent, the correspondent person + * must be linked to that organization. So this *removes all* person candidates that are + * not linked to the first organization candidate (which will be linked to the item). */ object CrossCheckProposals { diff --git a/modules/joex/src/main/scala/docspell/joex/process/DuplicateCheck.scala b/modules/joex/src/main/scala/docspell/joex/process/DuplicateCheck.scala index fea8d544..3dd1e5dd 100644 --- a/modules/joex/src/main/scala/docspell/joex/process/DuplicateCheck.scala +++ b/modules/joex/src/main/scala/docspell/joex/process/DuplicateCheck.scala @@ -52,7 +52,7 @@ object DuplicateCheck { val fname = ctx.args.files.find(_.fileMetaId.id == fd.fm.id).flatMap(_.name) if (fd.exists) ctx.logger - .info(s"Deleting duplicate file ${fname}!") *> ctx.store.bitpeace + .info(s"Deleting duplicate file $fname!") *> ctx.store.bitpeace .delete(fd.fm.id) .compile .drain diff --git a/modules/joex/src/main/scala/docspell/joex/process/EvalProposals.scala b/modules/joex/src/main/scala/docspell/joex/process/EvalProposals.scala index 98fc0ce7..6062a7a1 100644 --- a/modules/joex/src/main/scala/docspell/joex/process/EvalProposals.scala +++ b/modules/joex/src/main/scala/docspell/joex/process/EvalProposals.scala @@ -15,8 +15,7 @@ import docspell.common._ import docspell.joex.scheduler.{Context, Task} import docspell.store.records.{RAttachmentMeta, RPerson} -/** Calculate weights for candidates that adds the most likely - * candidate a lower number. +/** Calculate weights for candidates that adds the most likely candidate a lower number. */ object EvalProposals { diff --git a/modules/joex/src/main/scala/docspell/joex/process/ExtractArchive.scala b/modules/joex/src/main/scala/docspell/joex/process/ExtractArchive.scala index 8c5afb65..931140c9 100644 --- a/modules/joex/src/main/scala/docspell/joex/process/ExtractArchive.scala +++ b/modules/joex/src/main/scala/docspell/joex/process/ExtractArchive.scala @@ -25,16 +25,13 @@ import docspell.store.syntax.MimeTypes._ import bitpeace.{Mimetype, MimetypeHint, RangeDef} import emil.Mail -/** Goes through all attachments and extracts archive files, like zip - * files. The process is recursive, until all archives have been - * extracted. +/** Goes through all attachments and extracts archive files, like zip files. The process + * is recursive, until all archives have been extracted. * - * The archive file is stored as a `attachment_archive` record that - * references all its elements. If there are inner archive, only the - * outer archive file is preserved. + * The archive file is stored as a `attachment_archive` record that references all its + * elements. If there are inner archive, only the outer archive file is preserved. * - * This step assumes an existing premature item, it traverses its - * attachments. + * This step assumes an existing premature item, it traverses its attachments. */ object ExtractArchive { @@ -78,11 +75,10 @@ object ExtractArchive { ) } - /** After all files have been extracted, the `extract' contains the - * whole (combined) result. This fixes positions of the attachments - * such that the elements of an archive are "spliced" into the - * attachment list at the position of the archive. If there is no - * archive, positions don't need to be fixed. + /** After all files have been extracted, the `extract' contains the whole (combined) + * result. This fixes positions of the attachments such that the elements of an archive + * are "spliced" into the attachment list at the position of the archive. If there is + * no archive, positions don't need to be fixed. */ private def fixPositions(extract: Extracted): Extracted = if (extract.archives.isEmpty) extract @@ -203,8 +199,8 @@ object ExtractArchive { tentry: (Binary[F], Long) ): Stream[F, Extracted] = { val (entry, subPos) = tentry - val mimeHint = MimetypeHint.filename(entry.name).withAdvertised(entry.mime.asString) - val fileMeta = ctx.store.bitpeace.saveNew(entry.data, 8192, mimeHint) + val mimeHint = MimetypeHint.filename(entry.name).withAdvertised(entry.mime.asString) + val fileMeta = ctx.store.bitpeace.saveNew(entry.data, 8192, mimeHint) Stream.eval(ctx.logger.debug(s"Extracted ${entry.name}. Storing as attachment.")) >> fileMeta.evalMap { fm => Ident.randomId.map { id => @@ -267,7 +263,7 @@ object ExtractArchive { val sorted = nel.sorted val offset = sorted.head.first val pos = - sorted.zipWithIndex.map({ case (p, i) => p.id -> (i + offset) }).toList.toMap + sorted.zipWithIndex.map { case (p, i) => p.id -> (i + offset) }.toList.toMap val nf = files.map(f => pos.get(f.id).map(n => f.copy(position = n)).getOrElse(f)) copy(files = nf) diff --git a/modules/joex/src/main/scala/docspell/joex/process/FindProposal.scala b/modules/joex/src/main/scala/docspell/joex/process/FindProposal.scala index c936d258..e132f624 100644 --- a/modules/joex/src/main/scala/docspell/joex/process/FindProposal.scala +++ b/modules/joex/src/main/scala/docspell/joex/process/FindProposal.scala @@ -19,8 +19,8 @@ import docspell.joex.Config import docspell.joex.scheduler.{Context, Task} import docspell.store.records._ -/** Super simple approach to find corresponding meta data to an item - * by looking up values from NER in the users address book. +/** Super simple approach to find corresponding meta data to an item by looking up values + * from NER in the users address book. */ object FindProposal { type Args = ProcessItemArgs diff --git a/modules/joex/src/main/scala/docspell/joex/process/ItemData.scala b/modules/joex/src/main/scala/docspell/joex/process/ItemData.scala index 3376a3e0..a3796a31 100644 --- a/modules/joex/src/main/scala/docspell/joex/process/ItemData.scala +++ b/modules/joex/src/main/scala/docspell/joex/process/ItemData.scala @@ -12,18 +12,23 @@ import docspell.store.records.{RAttachment, RAttachmentMeta, RItem} /** Data that is carried across all processing tasks. * - * @param item the stored item record - * @param attachments the attachments belonging to the item - * @param metas the meta data to each attachment; depending on the - * state of processing, this may be empty - * @param dateLabels a separate list of found dates - * @param originFile a mapping from an attachment id to a filemeta-id - * containng the source or origin file - * @param givenMeta meta data to this item that was not "guessed" - * from an attachment but given and thus is always correct - * @param classifyProposals these are proposals that were obtained by - * a trained classifier. There are no ner-tags, it will only provide a - * single label + * @param item + * the stored item record + * @param attachments + * the attachments belonging to the item + * @param metas + * the meta data to each attachment; depending on the state of processing, this may be + * empty + * @param dateLabels + * a separate list of found dates + * @param originFile + * a mapping from an attachment id to a filemeta-id containng the source or origin file + * @param givenMeta + * meta data to this item that was not "guessed" from an attachment but given and thus + * is always correct + * @param classifyProposals + * these are proposals that were obtained by a trained classifier. There are no + * ner-tags, it will only provide a single label */ case class ItemData( item: RItem, @@ -31,7 +36,7 @@ case class ItemData( metas: Vector[RAttachmentMeta], dateLabels: Vector[AttachmentDates], originFile: Map[Ident, Ident], // maps RAttachment.id -> FileMeta.id - givenMeta: MetaProposalList, // given meta data not associated to a specific attachment + givenMeta: MetaProposalList, // given meta data not associated to a specific attachment // a list of tags (names or ids) attached to the item if they exist tags: List[String], // proposals obtained from the classifier @@ -39,9 +44,8 @@ case class ItemData( classifyTags: List[String] ) { - /** sort by weight; order of equal weights is not important, just - * choose one others are then suggestions - * doc-date is only set when given explicitely, not from "guessing" + /** sort by weight; order of equal weights is not important, just choose one others are + * then suggestions doc-date is only set when given explicitely, not from "guessing" */ def finalProposals: MetaProposalList = MetaProposalList diff --git a/modules/joex/src/main/scala/docspell/joex/process/ItemHandler.scala b/modules/joex/src/main/scala/docspell/joex/process/ItemHandler.scala index 50c10234..98c35734 100644 --- a/modules/joex/src/main/scala/docspell/joex/process/ItemHandler.scala +++ b/modules/joex/src/main/scala/docspell/joex/process/ItemHandler.scala @@ -77,7 +77,7 @@ object ItemHandler { )(data: ItemData): Task[F, Args, ItemData] = isLastRetry[F].flatMap { case true => - ProcessItem[F](cfg, itemOps, fts, analyser, regexNer)(data).attempt.flatMap({ + ProcessItem[F](cfg, itemOps, fts, analyser, regexNer)(data).attempt.flatMap { case Right(d) => Task.pure(d) case Left(ex) => @@ -85,7 +85,7 @@ object ItemHandler { "Processing failed on last retry. Creating item but without proposals." ).flatMap(_ => itemStateTask(ItemState.Created)(data)) .andThen(_ => Sync[F].raiseError(ex)) - }) + } case false => ProcessItem[F](cfg, itemOps, fts, analyser, regexNer)(data) .flatMap(itemStateTask(ItemState.Created)) diff --git a/modules/joex/src/main/scala/docspell/joex/process/LinkProposal.scala b/modules/joex/src/main/scala/docspell/joex/process/LinkProposal.scala index be443b10..f3ded684 100644 --- a/modules/joex/src/main/scala/docspell/joex/process/LinkProposal.scala +++ b/modules/joex/src/main/scala/docspell/joex/process/LinkProposal.scala @@ -57,7 +57,7 @@ object LinkProposal { case Some(a) => val ids = a.values.map(_.ref.id.id) ctx.logger.info( - s"Found many (${a.size}, ${ids}) candidates for ${a.proposalType}. Setting first." + s"Found many (${a.size}, $ids) candidates for ${a.proposalType}. Setting first." ) *> setItemMeta(data.item.id, ctx, a.proposalType, a.values.head.ref.id).map(_ => Result.multiple(mpt) diff --git a/modules/joex/src/main/scala/docspell/joex/process/ReProcessItem.scala b/modules/joex/src/main/scala/docspell/joex/process/ReProcessItem.scala index caf74e26..26c84b12 100644 --- a/modules/joex/src/main/scala/docspell/joex/process/ReProcessItem.scala +++ b/modules/joex/src/main/scala/docspell/joex/process/ReProcessItem.scala @@ -149,14 +149,14 @@ object ReProcessItem { isLastRetry[F].flatMap { case true => processFiles[F](cfg, fts, itemOps, analyser, regexNer, data).attempt - .flatMap({ + .flatMap { case Right(d) => Task.pure(d) case Left(ex) => logWarn[F]( "Processing failed on last retry." ).andThen(_ => Sync[F].raiseError(ex)) - }) + } case false => processFiles[F](cfg, fts, itemOps, analyser, regexNer, data) } diff --git a/modules/joex/src/main/scala/docspell/joex/process/SetGivenData.scala b/modules/joex/src/main/scala/docspell/joex/process/SetGivenData.scala index 53162af2..77266fa5 100644 --- a/modules/joex/src/main/scala/docspell/joex/process/SetGivenData.scala +++ b/modules/joex/src/main/scala/docspell/joex/process/SetGivenData.scala @@ -67,7 +67,7 @@ object SetGivenData { val tags = (ctx.args.meta.tags.getOrElse(Nil) ++ data.tags ++ data.classifyTags).distinct for { - _ <- ctx.logger.info(s"Set tags from given data: ${tags}") + _ <- ctx.logger.info(s"Set tags from given data: $tags") e <- ops.linkTags(itemId, tags, collective).attempt _ <- e.fold( ex => ctx.logger.warn(s"Error setting tags: ${ex.getMessage}"), diff --git a/modules/joex/src/main/scala/docspell/joex/process/TextExtraction.scala b/modules/joex/src/main/scala/docspell/joex/process/TextExtraction.scala index 0bcc849e..bdb25c48 100644 --- a/modules/joex/src/main/scala/docspell/joex/process/TextExtraction.scala +++ b/modules/joex/src/main/scala/docspell/joex/process/TextExtraction.scala @@ -106,7 +106,7 @@ object TextExtraction { item: ItemData )(ra: RAttachment): F[(RAttachmentMeta, List[String])] = for { - _ <- ctx.logger.debug(s"Extracting text for attachment ${stripAttachmentName(ra)}") + _ <- ctx.logger.debug(s"Extracting text for attachment ${stripAttachmentName(ra)}") dst <- Duration.stopTime[F] fids <- filesToExtract(ctx)(item, ra) res <- extractTextFallback(ctx, cfg, ra, lang)(fids) @@ -158,7 +158,7 @@ object TextExtraction { val extr = Extraction.create[F](ctx.logger, cfg) extractText[F](ctx, extr, lang)(id) - .flatMap({ + .flatMap { case res @ ExtractResult.Success(_, _) => res.some.pure[F] @@ -173,15 +173,14 @@ object TextExtraction { ctx.logger .warn(s"Cannot extract text: ${ex.getMessage}. Try with converted file") .flatMap(_ => extractTextFallback[F](ctx, cfg, ra, lang)(rest)) - }) + } } - /** Returns the fileIds to extract text from. First, the source file - * is tried. If that fails, the converted file is tried. + /** Returns the fileIds to extract text from. First, the source file is tried. If that + * fails, the converted file is tried. * - * If the source file is a PDF, then use the converted file. This - * may then already contain the text if ocrmypdf is enabled. If it - * is disabled, both files are the same. + * If the source file is a PDF, then use the converted file. This may then already + * contain the text if ocrmypdf is enabled. If it is disabled, both files are the same. */ private def filesToExtract[F[_]: Sync](ctx: Context[F, _])( item: ItemData, diff --git a/modules/joex/src/main/scala/docspell/joex/routes/JoexRoutes.scala b/modules/joex/src/main/scala/docspell/joex/routes/JoexRoutes.scala index 32fb2930..6311c2a8 100644 --- a/modules/joex/src/main/scala/docspell/joex/routes/JoexRoutes.scala +++ b/modules/joex/src/main/scala/docspell/joex/routes/JoexRoutes.scala @@ -50,7 +50,10 @@ object JoexRoutes { for { optJob <- app.scheduler.getRunning.map(_.find(_.id == id)) optLog <- optJob.traverse(j => app.findLogs(j.id)) - jAndL = for { job <- optJob; log <- optLog } yield mkJobLog(job, log) + jAndL = for { + job <- optJob + log <- optLog + } yield mkJobLog(job, log) resp <- jAndL.map(Ok(_)).getOrElse(NotFound(BasicResult(false, "Not found"))) } yield resp diff --git a/modules/joex/src/main/scala/docspell/joex/scanmailbox/ScanMailboxTask.scala b/modules/joex/src/main/scala/docspell/joex/scanmailbox/ScanMailboxTask.scala index 00314cb9..2c8125e4 100644 --- a/modules/joex/src/main/scala/docspell/joex/scanmailbox/ScanMailboxTask.scala +++ b/modules/joex/src/main/scala/docspell/joex/scanmailbox/ScanMailboxTask.scala @@ -46,7 +46,7 @@ object ScanMailboxTask { userId = ctx.args.account.user imapConn = ctx.args.imapConnection _ <- ctx.logger.info( - s"Reading mails for user ${userId.id} from ${imapConn.id}/${folders}" + s"Reading mails for user ${userId.id} from ${imapConn.id}/$folders" ) _ <- importMails(cfg, mailCfg, emil, upload, joex, ctx) } yield () diff --git a/modules/joex/src/main/scala/docspell/joex/scheduler/CountingScheme.scala b/modules/joex/src/main/scala/docspell/joex/scheduler/CountingScheme.scala index 294b04a8..e358d6b0 100644 --- a/modules/joex/src/main/scala/docspell/joex/scheduler/CountingScheme.scala +++ b/modules/joex/src/main/scala/docspell/joex/scheduler/CountingScheme.scala @@ -10,11 +10,10 @@ import cats.implicits._ import docspell.common.Priority -/** A counting scheme to indicate a ratio between scheduling high and - * low priority jobs. +/** A counting scheme to indicate a ratio between scheduling high and low priority jobs. * - * For example high=4, low=1 means: ”schedule 4 high priority jobs - * and then 1 low priority job“. + * For example high=4, low=1 means: ”schedule 4 high priority jobs and then 1 low + * priority job“. */ case class CountingScheme(high: Int, low: Int, counter: Int = 0) { diff --git a/modules/joex/src/main/scala/docspell/joex/scheduler/JobTask.scala b/modules/joex/src/main/scala/docspell/joex/scheduler/JobTask.scala index 211fde1c..8fe80b15 100644 --- a/modules/joex/src/main/scala/docspell/joex/scheduler/JobTask.scala +++ b/modules/joex/src/main/scala/docspell/joex/scheduler/JobTask.scala @@ -14,14 +14,13 @@ import docspell.common.syntax.all._ import io.circe.Decoder -/** Binds a Task to a name. This is required to lookup the code based - * on the taskName in the RJob data and to execute it given the - * arguments that have to be read from a string. +/** Binds a Task to a name. This is required to lookup the code based on the taskName in + * the RJob data and to execute it given the arguments that have to be read from a + * string. * - * Since the scheduler only has a string for the task argument, this - * only works for Task impls that accept a string. There is a - * convenience constructor that uses circe to decode json into some - * type A. + * Since the scheduler only has a string for the task argument, this only works for Task + * impls that accept a string. There is a convenience constructor that uses circe to + * decode json into some type A. */ case class JobTask[F[_]]( name: Ident, diff --git a/modules/joex/src/main/scala/docspell/joex/scheduler/JobTaskRegistry.scala b/modules/joex/src/main/scala/docspell/joex/scheduler/JobTaskRegistry.scala index b250d611..ceca03af 100644 --- a/modules/joex/src/main/scala/docspell/joex/scheduler/JobTaskRegistry.scala +++ b/modules/joex/src/main/scala/docspell/joex/scheduler/JobTaskRegistry.scala @@ -8,9 +8,8 @@ package docspell.joex.scheduler import docspell.common.Ident -/** This is a mapping from some identifier to a task. This is used by - * the scheduler to lookup an implementation using the taskName field - * of the RJob database record. +/** This is a mapping from some identifier to a task. This is used by the scheduler to + * lookup an implementation using the taskName field of the RJob database record. */ final class JobTaskRegistry[F[_]](tasks: Map[Ident, JobTask[F]]) { diff --git a/modules/joex/src/main/scala/docspell/joex/scheduler/PeriodicScheduler.scala b/modules/joex/src/main/scala/docspell/joex/scheduler/PeriodicScheduler.scala index b83aaed8..3b8d81be 100644 --- a/modules/joex/src/main/scala/docspell/joex/scheduler/PeriodicScheduler.scala +++ b/modules/joex/src/main/scala/docspell/joex/scheduler/PeriodicScheduler.scala @@ -13,14 +13,12 @@ import fs2.concurrent.SignallingRef import docspell.joexapi.client.JoexClient import docspell.store.queue._ -/** A periodic scheduler takes care to submit periodic tasks to the - * job queue. +/** A periodic scheduler takes care to submit periodic tasks to the job queue. * - * It is run in the background to regularily find a periodic task to - * execute. If the task is due, it will be submitted into the job - * queue where it will be picked up by the scheduler from some joex - * instance. If it is due in the future, a notification is scheduled - * to be received at that time so the task can be looked up again. + * It is run in the background to regularily find a periodic task to execute. If the task + * is due, it will be submitted into the job queue where it will be picked up by the + * scheduler from some joex instance. If it is due in the future, a notification is + * scheduled to be received at that time so the task can be looked up again. */ trait PeriodicScheduler[F[_]] { diff --git a/modules/joex/src/main/scala/docspell/joex/scheduler/PeriodicSchedulerImpl.scala b/modules/joex/src/main/scala/docspell/joex/scheduler/PeriodicSchedulerImpl.scala index d4a64387..34badfea 100644 --- a/modules/joex/src/main/scala/docspell/joex/scheduler/PeriodicSchedulerImpl.scala +++ b/modules/joex/src/main/scala/docspell/joex/scheduler/PeriodicSchedulerImpl.scala @@ -53,8 +53,8 @@ final class PeriodicSchedulerImpl[F[_]: Async]( // internal - /** On startup, get all periodic jobs from this scheduler and remove - * the mark, so they get picked up again. + /** On startup, get all periodic jobs from this scheduler and remove the mark, so they + * get picked up again. */ def init: F[Unit] = logError("Error clearing marks")(store.clearMarks(config.name)) @@ -68,7 +68,7 @@ final class PeriodicSchedulerImpl[F[_]: Async]( go <- logThrow("Error getting next task")( store .takeNext(config.name, None) - .use({ + .use { case Marked.Found(pj) => logger .fdebug(s"Found periodic task '${pj.subject}/${pj.timer.asString}'") *> @@ -79,7 +79,7 @@ final class PeriodicSchedulerImpl[F[_]: Async]( case Marked.NotMarkable => logger.fdebug("Periodic job cannot be marked. Trying again.") *> true .pure[F] - }) + } ) } yield go @@ -90,7 +90,7 @@ final class PeriodicSchedulerImpl[F[_]: Async]( else ().pure[F] ) .flatMap(if (_) Stream.empty else Stream.eval(cancelNotify *> body)) - .flatMap({ + .flatMap { case true => mainLoop case false => @@ -98,7 +98,7 @@ final class PeriodicSchedulerImpl[F[_]: Async]( waiter.discrete.take(2).drain ++ logger.sdebug(s"Notify signal, going into main loop") ++ mainLoop - }) + } } def isTriggered(pj: RPeriodicTask, now: Timestamp): Boolean = @@ -107,7 +107,7 @@ final class PeriodicSchedulerImpl[F[_]: Async]( def submitJob(pj: RPeriodicTask): F[Boolean] = store .findNonFinalJob(pj.id) - .flatMap({ + .flatMap { case Some(job) => logger.finfo[F]( s"There is already a job with non-final state '${job.state}' in the queue" @@ -116,7 +116,7 @@ final class PeriodicSchedulerImpl[F[_]: Async]( case None => logger.finfo[F](s"Submitting job for periodic task '${pj.task.id}'") *> pj.toJob.flatMap(queue.insert) *> notifyJoex *> true.pure[F] - }) + } def notifyJoex: F[Unit] = sch.notifyChange *> store.findJoexNodes.flatMap( @@ -145,12 +145,12 @@ final class PeriodicSchedulerImpl[F[_]: Async]( def cancelNotify: F[Unit] = state .modify(_.clearNotify) - .flatMap({ + .flatMap { case Some(fb) => fb.cancel case None => ().pure[F] - }) + } private def logError(msg: => String)(fa: F[Unit]): F[Unit] = fa.attempt.flatMap { @@ -159,12 +159,10 @@ final class PeriodicSchedulerImpl[F[_]: Async]( } private def logThrow[A](msg: => String)(fa: F[A]): F[A] = - fa.attempt - .flatMap({ - case r @ Right(_) => (r: Either[Throwable, A]).pure[F] - case l @ Left(ex) => logger.ferror(ex)(msg).map(_ => (l: Either[Throwable, A])) - }) - .rethrow + fa.attempt.flatMap { + case r @ Right(_) => (r: Either[Throwable, A]).pure[F] + case l @ Left(ex) => logger.ferror(ex)(msg).map(_ => (l: Either[Throwable, A])) + }.rethrow } object PeriodicSchedulerImpl { diff --git a/modules/joex/src/main/scala/docspell/joex/scheduler/Scheduler.scala b/modules/joex/src/main/scala/docspell/joex/scheduler/Scheduler.scala index 54972263..d0c5e58a 100644 --- a/modules/joex/src/main/scala/docspell/joex/scheduler/Scheduler.scala +++ b/modules/joex/src/main/scala/docspell/joex/scheduler/Scheduler.scala @@ -26,13 +26,11 @@ trait Scheduler[F[_]] { /** Requests to shutdown the scheduler. * - * The scheduler will not take any new jobs from the queue. If - * there are still running jobs, it waits for them to complete. - * when the cancelAll flag is set to true, it cancels all running - * jobs. + * The scheduler will not take any new jobs from the queue. If there are still running + * jobs, it waits for them to complete. when the cancelAll flag is set to true, it + * cancels all running jobs. * - * The returned F[Unit] can be evaluated to wait for all that to - * complete. + * The returned F[Unit] can be evaluated to wait for all that to complete. */ def shutdown(cancelAll: Boolean): F[Unit] diff --git a/modules/joex/src/main/scala/docspell/joex/scheduler/SchedulerImpl.scala b/modules/joex/src/main/scala/docspell/joex/scheduler/SchedulerImpl.scala index 8f5ed9db..087e9c68 100644 --- a/modules/joex/src/main/scala/docspell/joex/scheduler/SchedulerImpl.scala +++ b/modules/joex/src/main/scala/docspell/joex/scheduler/SchedulerImpl.scala @@ -36,8 +36,8 @@ final class SchedulerImpl[F[_]: Async]( private[this] val logger = getLogger - /** On startup, get all jobs in state running from this scheduler - * and put them into waiting state, so they get picked up again. + /** On startup, get all jobs in state running from this scheduler and put them into + * waiting state, so they get picked up again. */ def init: F[Unit] = QJob.runningToWaiting(config.name, store) @@ -132,7 +132,7 @@ final class SchedulerImpl[F[_]: Async]( else ().pure[F] ) .flatMap(if (_) Stream.empty else Stream.eval(body)) - .flatMap({ + .flatMap { case true => mainLoop case false => @@ -140,7 +140,7 @@ final class SchedulerImpl[F[_]: Async]( waiter.discrete.take(2).drain ++ logger.sdebug(s"Notify signal, going into main loop") ++ mainLoop - }) + } } private def executeCancel(job: RJob): F[Unit] = { @@ -214,7 +214,7 @@ final class SchedulerImpl[F[_]: Async]( ): Task[F, String, Unit] = task .mapF(fa => onStart(job) *> logger.fdebug("Starting task now") *> fa) - .mapF(_.attempt.flatMap({ + .mapF(_.attempt.flatMap { case Right(()) => logger.info(s"Job execution successful: ${job.info}") ctx.logger.info("Job execution successful") *> @@ -239,7 +239,7 @@ final class SchedulerImpl[F[_]: Async]( .map(_ => JobState.Stuck: JobState) } } - })) + }) .mapF(_.attempt.flatMap { case Right(jstate) => onFinish(job, jstate) @@ -262,12 +262,12 @@ final class SchedulerImpl[F[_]: Async]( .map(fiber => logger.fdebug(s"Cancelling job ${job.info}") *> fiber.cancel *> - onCancel.attempt.map({ + onCancel.attempt.map { case Right(_) => () case Left(ex) => logger.error(ex)(s"Task's cancelling code failed. Job ${job.info}.") () - }) *> + } *> state.modify(_.markCancelled(job)) *> onFinish(job, JobState.Cancelled) *> ctx.logger.warn("Job has been cancelled.") *> diff --git a/modules/joexapi/src/main/scala/docspell/joexapi/client/JoexClient.scala b/modules/joexapi/src/main/scala/docspell/joexapi/client/JoexClient.scala index 56b274ea..19895c76 100644 --- a/modules/joexapi/src/main/scala/docspell/joexapi/client/JoexClient.scala +++ b/modules/joexapi/src/main/scala/docspell/joexapi/client/JoexClient.scala @@ -51,7 +51,7 @@ object JoexClient { if (succ) () else logger.warn( - s"Notifying Joex instance '${base.asString}' returned with failure: ${msg}" + s"Notifying Joex instance '${base.asString}' returned with failure: $msg" ) case Left(ex) => logger.warn( diff --git a/modules/query/shared/src/main/scala/docspell/query/FulltextExtract.scala b/modules/query/shared/src/main/scala/docspell/query/FulltextExtract.scala index 08f82934..4acfd220 100644 --- a/modules/query/shared/src/main/scala/docspell/query/FulltextExtract.scala +++ b/modules/query/shared/src/main/scala/docspell/query/FulltextExtract.scala @@ -14,8 +14,7 @@ import docspell.query.ItemQuery.Expr.NotExpr import docspell.query.ItemQuery.Expr.OrExpr import docspell.query.ItemQuery._ -/** Currently, fulltext in a query is only supported when in "root - * AND" position +/** Currently, fulltext in a query is only supported when in "root AND" position */ object FulltextExtract { @@ -45,15 +44,15 @@ object FulltextExtract { def findFulltext(expr: Expr): Result = lookForFulltext(expr) - /** Extracts the fulltext node from the given expr and returns it - * together with the expr without that node. + /** Extracts the fulltext node from the given expr and returns it together with the expr + * without that node. */ private def lookForFulltext(expr: Expr): Result = expr match { case Expr.Fulltext(ftq) => Result.SuccessNoExpr(ftq) case Expr.AndExpr(inner) => - inner.collect({ case Expr.Fulltext(fq) => fq }) match { + inner.collect { case Expr.Fulltext(fq) => fq } match { case Nil => checkPosition(expr, 0) case e :: Nil => diff --git a/modules/query/shared/src/main/scala/docspell/query/ItemQuery.scala b/modules/query/shared/src/main/scala/docspell/query/ItemQuery.scala index b2a78aa3..974f9d6e 100644 --- a/modules/query/shared/src/main/scala/docspell/query/ItemQuery.scala +++ b/modules/query/shared/src/main/scala/docspell/query/ItemQuery.scala @@ -10,12 +10,10 @@ import cats.data.{NonEmptyList => Nel} import docspell.query.ItemQuery.Attr.{DateAttr, IntAttr, StringAttr} -/** A query evaluates to `true` or `false` given enough details about - * an item. +/** A query evaluates to `true` or `false` given enough details about an item. * - * It may consist of (field,op,value) tuples that specify some checks - * against a specific field of an item using some operator or a - * combination thereof. + * It may consist of (field,op,value) tuples that specify some checks against a specific + * field of an item using some operator or a combination thereof. */ final case class ItemQuery(expr: ItemQuery.Expr, raw: Option[String]) { def findFulltext: FulltextExtract.Result = diff --git a/modules/query/shared/src/main/scala/docspell/query/ParseFailure.scala b/modules/query/shared/src/main/scala/docspell/query/ParseFailure.scala index 5c6013dd..8caf02b3 100644 --- a/modules/query/shared/src/main/scala/docspell/query/ParseFailure.scala +++ b/modules/query/shared/src/main/scala/docspell/query/ParseFailure.scala @@ -45,7 +45,7 @@ object ParseFailure { def render: String = { val opts = expected.mkString(", ") val dots = if (exhaustive) "" else "…" - s"Expected: ${opts}${dots}" + s"Expected: $opts$dots" } } @@ -57,11 +57,11 @@ object ParseFailure { ) private[query] def packMsg(msg: Nel[Message]): Nel[Message] = { - val expectMsg = combineExpected(msg.collect({ case em: ExpectMessage => em })) + val expectMsg = combineExpected(msg.collect { case em: ExpectMessage => em }) .sortBy(_.offset) .headOption - val simpleMsg = msg.collect({ case sm: SimpleMessage => sm }) + val simpleMsg = msg.collect { case sm: SimpleMessage => sm } Nel.fromListUnsafe((simpleMsg ++ expectMsg).sortBy(_.offset)) } @@ -69,13 +69,13 @@ object ParseFailure { private[query] def combineExpected(msg: List[ExpectMessage]): List[ExpectMessage] = msg .groupBy(_.offset) - .map({ case (offset, es) => + .map { case (offset, es) => ExpectMessage( offset, es.flatMap(_.expected).distinct.sorted, es.forall(_.exhaustive) ) - }) + } .toList private[query] def expectationToMsg(e: Parser.Expectation): Message = @@ -89,7 +89,7 @@ object ParseFailure { case InRange(offset, lower, upper) => if (lower == upper) ExpectMessage(offset, List(lower.toString), true) else { - val expect = s"${lower}-${upper}" + val expect = s"$lower-$upper" ExpectMessage(offset, List(expect), true) } diff --git a/modules/query/shared/src/main/scala/docspell/query/internal/ExprUtil.scala b/modules/query/shared/src/main/scala/docspell/query/internal/ExprUtil.scala index 8b15df94..6a6755a0 100644 --- a/modules/query/shared/src/main/scala/docspell/query/internal/ExprUtil.scala +++ b/modules/query/shared/src/main/scala/docspell/query/internal/ExprUtil.scala @@ -13,8 +13,8 @@ import docspell.query.ItemQuery._ object ExprUtil { - /** Does some basic transformation, like unfolding nested and trees - * containing one value etc. + /** Does some basic transformation, like unfolding nested and trees containing one value + * etc. */ def reduce(expr: Expr): Expr = expr match { diff --git a/modules/restserver/src/main/scala/docspell/restserver/Config.scala b/modules/restserver/src/main/scala/docspell/restserver/Config.scala index 5454de28..d1fc8c0f 100644 --- a/modules/restserver/src/main/scala/docspell/restserver/Config.scala +++ b/modules/restserver/src/main/scala/docspell/restserver/Config.scala @@ -54,12 +54,12 @@ object Config { lazy val ipParts = ip.split('.') def checkSingle(pattern: String): Boolean = - pattern == ip || (inet.isLoopbackAddress && pattern == "127.0.0.1") || (pattern + pattern == ip || (inet.isLoopbackAddress && pattern == "127.0.0.1") || pattern .split('.') .zip(ipParts) .foldLeft(true) { case (r, (a, b)) => r && (a == "*" || a == b) - }) + } ips.exists(checkSingle) } diff --git a/modules/restserver/src/main/scala/docspell/restserver/RestServer.scala b/modules/restserver/src/main/scala/docspell/restserver/RestServer.scala index 60a33b6e..190ba7b2 100644 --- a/modules/restserver/src/main/scala/docspell/restserver/RestServer.scala +++ b/modules/restserver/src/main/scala/docspell/restserver/RestServer.scala @@ -91,10 +91,10 @@ object RestServer { "usertask/notifydueitems" -> NotifyDueItemsRoutes(cfg, restApp.backend, token), "usertask/scanmailbox" -> ScanMailboxRoutes(restApp.backend, token), "calevent/check" -> CalEventCheckRoutes(), - "fts" -> FullTextIndexRoutes.secured(cfg, restApp.backend, token), - "folder" -> FolderRoutes(restApp.backend, token), - "customfield" -> CustomFieldRoutes(restApp.backend, token), - "clientSettings" -> ClientSettingsRoutes(restApp.backend, token) + "fts" -> FullTextIndexRoutes.secured(cfg, restApp.backend, token), + "folder" -> FolderRoutes(restApp.backend, token), + "customfield" -> CustomFieldRoutes(restApp.backend, token), + "clientSettings" -> ClientSettingsRoutes(restApp.backend, token) ) def openRoutes[F[_]: Async](cfg: Config, restApp: RestApp[F]): HttpRoutes[F] = diff --git a/modules/restserver/src/main/scala/docspell/restserver/conv/Conversions.scala b/modules/restserver/src/main/scala/docspell/restserver/conv/Conversions.scala index fb9323b1..c4f5fd64 100644 --- a/modules/restserver/src/main/scala/docspell/restserver/conv/Conversions.scala +++ b/modules/restserver/src/main/scala/docspell/restserver/conv/Conversions.scala @@ -579,7 +579,7 @@ trait Conversions { ) def newSource[F[_]: Sync](s: Source, cid: Ident): F[RSource] = - timeId.map({ case (id, now) => + timeId.map { case (id, now) => RSource( id, cid, @@ -593,7 +593,7 @@ trait Conversions { s.fileFilter, s.language ) - }) + } def changeSource[F[_]](s: Source, coll: Ident): RSource = RSource( @@ -615,9 +615,9 @@ trait Conversions { Equipment(re.eid, re.name, re.created, re.notes, re.use) def newEquipment[F[_]: Sync](e: Equipment, cid: Ident): F[REquipment] = - timeId.map({ case (id, now) => + timeId.map { case (id, now) => REquipment(id, cid, e.name.trim, now, now, e.notes, e.use) - }) + } def changeEquipment[F[_]: Sync](e: Equipment, cid: Ident): F[REquipment] = Timestamp diff --git a/modules/restserver/src/main/scala/docspell/restserver/http4s/ClientRequestInfo.scala b/modules/restserver/src/main/scala/docspell/restserver/http4s/ClientRequestInfo.scala index 5cc1b9be..b361a1ad 100644 --- a/modules/restserver/src/main/scala/docspell/restserver/http4s/ClientRequestInfo.scala +++ b/modules/restserver/src/main/scala/docspell/restserver/http4s/ClientRequestInfo.scala @@ -29,7 +29,7 @@ object ClientRequestInfo { scheme <- NonEmptyList.fromList(getProtocol(req).toList) host <- getHostname(req) port = xForwardedPort(req).getOrElse(serverPort) - hostPort = if (port == 80 || port == 443) host else s"${host}:${port}" + hostPort = if (port == 80 || port == 443) host else s"$host:$port" } yield LenientUri(scheme, Some(hostPort), LenientUri.EmptyPath, None, None) def getHostname[F[_]](req: Request[F]): Option[String] = diff --git a/modules/restserver/src/main/scala/docspell/restserver/routes/AdminRoutes.scala b/modules/restserver/src/main/scala/docspell/restserver/routes/AdminRoutes.scala index 6480b443..fded091a 100644 --- a/modules/restserver/src/main/scala/docspell/restserver/routes/AdminRoutes.scala +++ b/modules/restserver/src/main/scala/docspell/restserver/routes/AdminRoutes.scala @@ -56,5 +56,5 @@ object AdminRoutes { private def compareSecret(s1: String)(s2: String): Boolean = s1.length > 0 && s1.length == s2.length && - s1.zip(s2).forall({ case (a, b) => a == b }) + s1.zip(s2).forall { case (a, b) => a == b } } diff --git a/modules/restserver/src/main/scala/docspell/restserver/routes/AttachmentMultiRoutes.scala b/modules/restserver/src/main/scala/docspell/restserver/routes/AttachmentMultiRoutes.scala index a17945f4..559565ce 100644 --- a/modules/restserver/src/main/scala/docspell/restserver/routes/AttachmentMultiRoutes.scala +++ b/modules/restserver/src/main/scala/docspell/restserver/routes/AttachmentMultiRoutes.scala @@ -33,7 +33,7 @@ object AttachmentMultiRoutes extends MultiIdSupport { for { json <- req.as[IdList] attachments <- readIds[F](json.ids) - n <- backend.item.deleteAttachmentMultiple(attachments, user.account.collective) + n <- backend.item.deleteAttachmentMultiple(attachments, user.account.collective) res = BasicResult( n > 0, if (n > 0) "Attachment(s) deleted" else "Attachment deletion failed." diff --git a/modules/restserver/src/main/scala/docspell/restserver/routes/ItemMultiRoutes.scala b/modules/restserver/src/main/scala/docspell/restserver/routes/ItemMultiRoutes.scala index 77af7851..1bc1fd58 100644 --- a/modules/restserver/src/main/scala/docspell/restserver/routes/ItemMultiRoutes.scala +++ b/modules/restserver/src/main/scala/docspell/restserver/routes/ItemMultiRoutes.scala @@ -109,16 +109,16 @@ object ItemMultiRoutes extends MultiIdSupport { for { json <- req.as[ItemsAndRef] items <- readIds[F](json.items) - res <- backend.item.setFolderMultiple(items, json.ref, user.account.collective) - resp <- Ok(Conversions.basicResult(res, "Folder updated")) + res <- backend.item.setFolderMultiple(items, json.ref, user.account.collective) + resp <- Ok(Conversions.basicResult(res, "Folder updated")) } yield resp case req @ PUT -> Root / "direction" => for { json <- req.as[ItemsAndDirection] items <- readIds[F](json.items) - res <- backend.item.setDirection(items, json.direction, user.account.collective) - resp <- Ok(Conversions.basicResult(res, "Direction updated")) + res <- backend.item.setDirection(items, json.direction, user.account.collective) + resp <- Ok(Conversions.basicResult(res, "Direction updated")) } yield resp case req @ PUT -> Root / "date" => diff --git a/modules/store/src/main/scala/docspell/store/impl/DoobieMeta.scala b/modules/store/src/main/scala/docspell/store/impl/DoobieMeta.scala index 09c01f38..c9cd15fa 100644 --- a/modules/store/src/main/scala/docspell/store/impl/DoobieMeta.scala +++ b/modules/store/src/main/scala/docspell/store/impl/DoobieMeta.scala @@ -22,12 +22,12 @@ import io.circe.{Decoder, Encoder} trait DoobieMeta extends EmilDoobieMeta { - implicit val sqlLogging = LogHandler({ + implicit val sqlLogging = LogHandler { case e @ Success(_, _, _, _) => DoobieMeta.logger.trace("SQL " + e) case e => DoobieMeta.logger.error(s"SQL Failure: $e") - }) + } def jsonMeta[A](implicit d: Decoder[A], e: Encoder[A]): Meta[A] = Meta[String].imap(str => str.parseJsonAs[A].fold(ex => throw ex, identity))(a => diff --git a/modules/store/src/main/scala/docspell/store/migrate/FlywayMigrate.scala b/modules/store/src/main/scala/docspell/store/migrate/FlywayMigrate.scala index 57783ccd..f80c5c3b 100644 --- a/modules/store/src/main/scala/docspell/store/migrate/FlywayMigrate.scala +++ b/modules/store/src/main/scala/docspell/store/migrate/FlywayMigrate.scala @@ -22,7 +22,7 @@ object FlywayMigrate { logger.info("Running db migrations...") val locations = jdbc.dbmsName match { case Some(dbtype) => - List(s"classpath:db/migration/${dbtype}") + List(s"classpath:db/migration/$dbtype") case None => logger.warn( s"Cannot read database name from jdbc url: ${jdbc.url}. Go with H2" diff --git a/modules/store/src/main/scala/docspell/store/qb/FromExpr.scala b/modules/store/src/main/scala/docspell/store/qb/FromExpr.scala index ff35dd94..d894fe82 100644 --- a/modules/store/src/main/scala/docspell/store/qb/FromExpr.scala +++ b/modules/store/src/main/scala/docspell/store/qb/FromExpr.scala @@ -22,13 +22,12 @@ sealed trait FromExpr { def leftJoin(sel: Select, alias: String, on: Condition): Joined = leftJoin(Relation.SubSelect(sel, alias), on) - /** Prepends the given from expression to existing joins. It will - * replace the current [[FromExpr.From]] value. + /** Prepends the given from expression to existing joins. It will replace the current + * [[FromExpr.From]] value. * - * If this is a [[FromExpr.From]], it is replaced by the given - * expression. If this is a [[FromExpr.Joined]] then the given - * expression replaces the current `From` and the joins are - * prepended to the existing joins. + * If this is a [[FromExpr.From]], it is replaced by the given expression. If this is a + * [[FromExpr.Joined]] then the given expression replaces the current `From` and the + * joins are prepended to the existing joins. */ def prepend(fe: FromExpr): FromExpr } diff --git a/modules/store/src/main/scala/docspell/store/queries/QAttachment.scala b/modules/store/src/main/scala/docspell/store/queries/QAttachment.scala index 81674e8c..0eaa0814 100644 --- a/modules/store/src/main/scala/docspell/store/queries/QAttachment.scala +++ b/modules/store/src/main/scala/docspell/store/queries/QAttachment.scala @@ -46,9 +46,8 @@ object QAttachment { .foldMonoid } - /** Deletes an attachment, its related source and meta data records. - * It will only delete an related archive file, if this is the last - * attachment in that archive. + /** Deletes an attachment, its related source and meta data records. It will only delete + * an related archive file, if this is the last attachment in that archive. */ def deleteSingleAttachment[F[_]: Sync]( store: Store[F] @@ -77,9 +76,9 @@ object QAttachment { } yield n + k + f } - /** This deletes the attachment and *all* its related files. This used - * when deleting an item and should not be used to delete a - * *single* attachment where the item should stay. + /** This deletes the attachment and *all* its related files. This used when deleting an + * item and should not be used to delete a *single* attachment where the item should + * stay. */ private def deleteAttachment[F[_]: Sync](store: Store[F])(ra: RAttachment): F[Int] = for { diff --git a/modules/store/src/main/scala/docspell/store/queries/QItem.scala b/modules/store/src/main/scala/docspell/store/queries/QItem.scala index 402c9612..5b860961 100644 --- a/modules/store/src/main/scala/docspell/store/queries/QItem.scala +++ b/modules/store/src/main/scala/docspell/store/queries/QItem.scala @@ -368,8 +368,8 @@ object QItem { from.query[ListItem].stream } - /** Same as `findItems` but resolves the tags for each item. Note that - * this is implemented by running an additional query per item. + /** Same as `findItems` but resolves the tags for each item. Note that this is + * implemented by running an additional query per item. */ def findItemsWithTags( collective: Ident, diff --git a/modules/store/src/main/scala/docspell/store/queries/QJob.scala b/modules/store/src/main/scala/docspell/store/queries/QJob.scala index 2973a667..3853d6eb 100644 --- a/modules/store/src/main/scala/docspell/store/queries/QJob.scala +++ b/modules/store/src/main/scala/docspell/store/queries/QJob.scala @@ -43,14 +43,14 @@ object QJob { else ().pure[F] } .find(_.isRight) - .flatMap({ + .flatMap { case Right(job) => Stream.emit(job) case Left(_) => Stream .eval(logger.fwarn[F]("Cannot mark job, even after retrying. Give up.")) .map(_ => None) - }) + } .compile .last .map(_.flatten) diff --git a/modules/store/src/main/scala/docspell/store/queries/QOrganization.scala b/modules/store/src/main/scala/docspell/store/queries/QOrganization.scala index b223d2df..09acc340 100644 --- a/modules/store/src/main/scala/docspell/store/queries/QOrganization.scala +++ b/modules/store/src/main/scala/docspell/store/queries/QOrganization.scala @@ -43,10 +43,10 @@ object QOrganization { .query[(ROrganization, Option[RContact])] .stream .groupAdjacentBy(_._1) - .map({ case (ro, chunk) => + .map { case (ro, chunk) => val cs = chunk.toVector.flatMap(_._2) (ro, cs) - }) + } } def getOrgAndContact( @@ -63,10 +63,10 @@ object QOrganization { .query[(ROrganization, Option[RContact])] .stream .groupAdjacentBy(_._1) - .map({ case (ro, chunk) => + .map { case (ro, chunk) => val cs = chunk.toVector.flatMap(_._2) (ro, cs) - }) + } .compile .last } @@ -91,11 +91,11 @@ object QOrganization { .query[(RPerson, Option[ROrganization], Option[RContact])] .stream .groupAdjacentBy(_._1) - .map({ case (rp, chunk) => + .map { case (rp, chunk) => val cs = chunk.toVector.flatMap(_._3) val ro = chunk.map(_._2).head.flatten (rp, ro, cs) - }) + } } def getPersonAndContact( @@ -115,11 +115,11 @@ object QOrganization { .query[(RPerson, Option[ROrganization], Option[RContact])] .stream .groupAdjacentBy(_._1) - .map({ case (rp, chunk) => + .map { case (rp, chunk) => val cs = chunk.toVector.flatMap(_._3) val ro = chunk.map(_._2).head.flatten (rp, ro, cs) - }) + } .compile .last } diff --git a/modules/store/src/main/scala/docspell/store/queries/QueryWildcard.scala b/modules/store/src/main/scala/docspell/store/queries/QueryWildcard.scala index dbcdf07f..83abdf48 100644 --- a/modules/store/src/main/scala/docspell/store/queries/QueryWildcard.scala +++ b/modules/store/src/main/scala/docspell/store/queries/QueryWildcard.scala @@ -31,5 +31,5 @@ object QueryWildcard { def addAtEnd(s: String): String = if (s.endsWith("*")) atEnd(s) - else s"${s}%" + else s"$s%" } diff --git a/modules/store/src/main/scala/docspell/store/queue/JobQueue.scala b/modules/store/src/main/scala/docspell/store/queue/JobQueue.scala index 5a641d4b..9d09d56e 100644 --- a/modules/store/src/main/scala/docspell/store/queue/JobQueue.scala +++ b/modules/store/src/main/scala/docspell/store/queue/JobQueue.scala @@ -19,14 +19,13 @@ import org.log4s._ trait JobQueue[F[_]] { - /** Inserts the job into the queue to get picked up as soon as - * possible. The job must have a new unique id. + /** Inserts the job into the queue to get picked up as soon as possible. The job must + * have a new unique id. */ def insert(job: RJob): F[Unit] - /** Inserts the job into the queue only, if there is no job with the - * same tracker-id running at the moment. The job id must be a new - * unique id. + /** Inserts the job into the queue only, if there is no job with the same tracker-id + * running at the moment. The job id must be a new unique id. * * If the job has no tracker defined, it is simply inserted. */ diff --git a/modules/store/src/main/scala/docspell/store/queue/PeriodicTaskStore.scala b/modules/store/src/main/scala/docspell/store/queue/PeriodicTaskStore.scala index f7b8f8f6..8b8c57f7 100644 --- a/modules/store/src/main/scala/docspell/store/queue/PeriodicTaskStore.scala +++ b/modules/store/src/main/scala/docspell/store/queue/PeriodicTaskStore.scala @@ -19,12 +19,10 @@ import org.log4s.getLogger trait PeriodicTaskStore[F[_]] { - /** Get the free periodic task due next and reserve it to the given - * worker. + /** Get the free periodic task due next and reserve it to the given worker. * - * If found, the task is returned and resource finalization takes - * care of unmarking the task after use and updating `nextRun` with - * the next timestamp. + * If found, the task is returned and resource finalization takes care of unmarking the + * task after use and updating `nextRun` with the next timestamp. */ def takeNext( worker: Ident, @@ -69,10 +67,10 @@ object PeriodicTaskStore { Marked.notFound.pure[F] } - Resource.make(chooseNext)({ + Resource.make(chooseNext) { case Marked.Found(pj) => unmark(pj) case _ => ().pure[F] - }) + } } def getNext(excl: Option[Ident]): F[Option[RPeriodicTask]] = diff --git a/modules/store/src/main/scala/docspell/store/records/RAttachmentArchive.scala b/modules/store/src/main/scala/docspell/store/records/RAttachmentArchive.scala index 6c85972c..defc17d0 100644 --- a/modules/store/src/main/scala/docspell/store/records/RAttachmentArchive.scala +++ b/modules/store/src/main/scala/docspell/store/records/RAttachmentArchive.scala @@ -17,8 +17,8 @@ import bitpeace.FileMeta import doobie._ import doobie.implicits._ -/** The archive file of some attachment. The `id` is shared with the - * attachment, to create a 0..1-1 relationship. +/** The archive file of some attachment. The `id` is shared with the attachment, to create + * a 0..1-1 relationship. */ case class RAttachmentArchive( id: Ident, //same as RAttachment.id @@ -113,9 +113,8 @@ object RAttachmentArchive { ).orderBy(b.position.asc).build.query[(RAttachmentArchive, FileMeta)].to[Vector] } - /** If the given attachment id has an associated archive, this returns - * the number of all associated attachments. Returns 0 if there is - * no archive for the given attachment. + /** If the given attachment id has an associated archive, this returns the number of all + * associated attachments. Returns 0 if there is no archive for the given attachment. */ def countEntries(attachId: Ident): ConnectionIO[Int] = Select( diff --git a/modules/store/src/main/scala/docspell/store/records/RAttachmentPreview.scala b/modules/store/src/main/scala/docspell/store/records/RAttachmentPreview.scala index f70100b7..e423f14a 100644 --- a/modules/store/src/main/scala/docspell/store/records/RAttachmentPreview.scala +++ b/modules/store/src/main/scala/docspell/store/records/RAttachmentPreview.scala @@ -16,8 +16,8 @@ import bitpeace.FileMeta import doobie._ import doobie.implicits._ -/** A preview image of an attachment. The `id` is shared with the - * attachment, to create a 1-1 (or 0..1-1) relationship. +/** A preview image of an attachment. The `id` is shared with the attachment, to create a + * 1-1 (or 0..1-1) relationship. */ case class RAttachmentPreview( id: Ident, //same as RAttachment.id diff --git a/modules/store/src/main/scala/docspell/store/records/RAttachmentSource.scala b/modules/store/src/main/scala/docspell/store/records/RAttachmentSource.scala index 609b781d..a70891c9 100644 --- a/modules/store/src/main/scala/docspell/store/records/RAttachmentSource.scala +++ b/modules/store/src/main/scala/docspell/store/records/RAttachmentSource.scala @@ -16,8 +16,8 @@ import bitpeace.FileMeta import doobie._ import doobie.implicits._ -/** The origin file of an attachment. The `id` is shared with the - * attachment, to create a 1-1 (or 0..1-1) relationship. +/** The origin file of an attachment. The `id` is shared with the attachment, to create a + * 1-1 (or 0..1-1) relationship. */ case class RAttachmentSource( id: Ident, //same as RAttachment.id diff --git a/modules/store/src/main/scala/docspell/store/records/RClassifierSetting.scala b/modules/store/src/main/scala/docspell/store/records/RClassifierSetting.scala index 424ee2fb..4fceff87 100644 --- a/modules/store/src/main/scala/docspell/store/records/RClassifierSetting.scala +++ b/modules/store/src/main/scala/docspell/store/records/RClassifierSetting.scala @@ -87,10 +87,9 @@ object RClassifierSetting { def delete(coll: Ident): ConnectionIO[Int] = DML.delete(T, T.cid === coll) - /** Finds tag categories that exist and match the classifier setting. - * If the setting contains a black list, they are removed from the - * existing categories. If it is a whitelist, the intersection is - * returned. + /** Finds tag categories that exist and match the classifier setting. If the setting + * contains a black list, they are removed from the existing categories. If it is a + * whitelist, the intersection is returned. */ def getActiveCategories(coll: Ident): ConnectionIO[List[String]] = (for { @@ -104,7 +103,9 @@ object RClassifierSetting { } } yield res).getOrElse(Nil) - /** Checks the json array of tag categories and removes those that are not present anymore. */ + /** Checks the json array of tag categories and removes those that are not present + * anymore. + */ def fixCategoryList(coll: Ident): ConnectionIO[Int] = (for { sett <- OptionT(findById(coll)) diff --git a/modules/store/src/main/scala/docspell/store/records/RContact.scala b/modules/store/src/main/scala/docspell/store/records/RContact.scala index 12833aac..fc0caa97 100644 --- a/modules/store/src/main/scala/docspell/store/records/RContact.scala +++ b/modules/store/src/main/scala/docspell/store/records/RContact.scala @@ -35,7 +35,7 @@ object RContact { val personId = Column[Ident]("pid", this) val orgId = Column[Ident]("oid", this) val created = Column[Timestamp]("created", this) - val all = NonEmptyList.of[Column[_]](contactId, value, kind, personId, orgId, created) + val all = NonEmptyList.of[Column[_]](contactId, value, kind, personId, orgId, created) } private val T = Table(None) diff --git a/modules/store/src/main/scala/docspell/store/records/RFolder.scala b/modules/store/src/main/scala/docspell/store/records/RFolder.scala index f4e48a39..36ecb3ec 100644 --- a/modules/store/src/main/scala/docspell/store/records/RFolder.scala +++ b/modules/store/src/main/scala/docspell/store/records/RFolder.scala @@ -88,7 +88,7 @@ object RFolder { case Some(id) => id.pure[ConnectionIO] case None => Sync[ConnectionIO].raiseError( - new Exception(s"No folder found for: id=${folderId.id} or name=${name}") + new Exception(s"No folder found for: id=${folderId.id} or name=$name") ) } } diff --git a/modules/store/src/main/scala/docspell/store/records/RItemProposal.scala b/modules/store/src/main/scala/docspell/store/records/RItemProposal.scala index a30311f2..2f87fc6c 100644 --- a/modules/store/src/main/scala/docspell/store/records/RItemProposal.scala +++ b/modules/store/src/main/scala/docspell/store/records/RItemProposal.scala @@ -30,7 +30,7 @@ object RItemProposal { val classifyProposals = Column[MetaProposalList]("classifier_proposals", this) val classifyTags = Column[List[IdRef]]("classifier_tags", this) val created = Column[Timestamp]("created", this) - val all = NonEmptyList.of[Column[_]](itemId, classifyProposals, classifyTags, created) + val all = NonEmptyList.of[Column[_]](itemId, classifyProposals, classifyTags, created) } val T = Table(None) diff --git a/modules/store/src/main/scala/docspell/store/records/RNode.scala b/modules/store/src/main/scala/docspell/store/records/RNode.scala index 623cd482..fe2a7da5 100644 --- a/modules/store/src/main/scala/docspell/store/records/RNode.scala +++ b/modules/store/src/main/scala/docspell/store/records/RNode.scala @@ -40,7 +40,7 @@ object RNode { val updated = Column[Timestamp]("updated", this) val created = Column[Timestamp]("created", this) val notFound = Column[Int]("not_found", this) - val all = NonEmptyList.of[Column[_]](id, nodeType, url, updated, created, notFound) + val all = NonEmptyList.of[Column[_]](id, nodeType, url, updated, created, notFound) } def as(alias: String): Table = diff --git a/modules/store/src/main/scala/docspell/store/records/RPeriodicTask.scala b/modules/store/src/main/scala/docspell/store/records/RPeriodicTask.scala index e4a3b8b0..7612900f 100644 --- a/modules/store/src/main/scala/docspell/store/records/RPeriodicTask.scala +++ b/modules/store/src/main/scala/docspell/store/records/RPeriodicTask.scala @@ -20,9 +20,8 @@ import doobie._ import doobie.implicits._ import io.circe.Encoder -/** A periodic task is a special job description, that shares a few - * properties of a `RJob`. It must provide all information to create - * a `RJob` value eventually. +/** A periodic task is a special job description, that shares a few properties of a + * `RJob`. It must provide all information to create a `RJob` value eventually. */ case class RPeriodicTask( id: Ident, diff --git a/modules/store/src/main/scala/docspell/store/records/TagItemName.scala b/modules/store/src/main/scala/docspell/store/records/TagItemName.scala index 8eeb2f4f..2026f217 100644 --- a/modules/store/src/main/scala/docspell/store/records/TagItemName.scala +++ b/modules/store/src/main/scala/docspell/store/records/TagItemName.scala @@ -12,8 +12,8 @@ import docspell.common._ import docspell.store.qb.DSL._ import docspell.store.qb.{Condition, Select} -/** A helper class combining information from `RTag` and `RTagItem`. - * This is not a "record", there is no corresponding table. +/** A helper class combining information from `RTag` and `RTagItem`. This is not a + * "record", there is no corresponding table. */ case class TagItemName( tagId: Ident, diff --git a/modules/store/src/main/scala/docspell/store/usertask/UserTaskScope.scala b/modules/store/src/main/scala/docspell/store/usertask/UserTaskScope.scala index 464e07d1..651b0bf5 100644 --- a/modules/store/src/main/scala/docspell/store/usertask/UserTaskScope.scala +++ b/modules/store/src/main/scala/docspell/store/usertask/UserTaskScope.scala @@ -17,8 +17,8 @@ sealed trait UserTaskScope { self: Product => def fold[A](fa: AccountId => A, fb: Ident => A): A - /** Maps to the account or uses the collective for both parts if the - * scope is collective wide. + /** Maps to the account or uses the collective for both parts if the scope is collective + * wide. */ private[usertask] def toAccountId: AccountId = AccountId(collective, fold(_.user, identity)) diff --git a/modules/store/src/main/scala/docspell/store/usertask/UserTaskStore.scala b/modules/store/src/main/scala/docspell/store/usertask/UserTaskStore.scala index 1bb53794..7e9e7d01 100644 --- a/modules/store/src/main/scala/docspell/store/usertask/UserTaskStore.scala +++ b/modules/store/src/main/scala/docspell/store/usertask/UserTaskStore.scala @@ -17,21 +17,18 @@ import docspell.store.{AddResult, Store} import io.circe._ -/** User tasks are `RPeriodicTask`s that can be managed by the user. - * The user can change arguments, enable/disable it or run it just - * once. +/** User tasks are `RPeriodicTask`s that can be managed by the user. The user can change + * arguments, enable/disable it or run it just once. * - * This class defines methods at a higher level, dealing with - * `UserTask` and `UserTaskScope` instead of directly using - * `RPeriodicTask`. A user task is associated to a specific user (not - * just the collective). But it can be associated to the whole - * collective by using the collective as submitter, too. This is - * abstracted in `UserTaskScope`. + * This class defines methods at a higher level, dealing with `UserTask` and + * `UserTaskScope` instead of directly using `RPeriodicTask`. A user task is associated + * to a specific user (not just the collective). But it can be associated to the whole + * collective by using the collective as submitter, too. This is abstracted in + * `UserTaskScope`. * - * implNote: The mapping is as follows: The collective is the task - * group. The submitter property contains the username. Once a task - * is saved to the database, it can only be referenced uniquely by its - * id. A user may submit multiple same tasks (with different + * implNote: The mapping is as follows: The collective is the task group. The submitter + * property contains the username. Once a task is saved to the database, it can only be + * referenced uniquely by its id. A user may submit multiple same tasks (with different * properties). */ trait UserTaskStore[F[_]] { @@ -40,13 +37,13 @@ trait UserTaskStore[F[_]] { */ def getAll(scope: UserTaskScope): Stream[F, UserTask[String]] - /** Return all tasks of the given name and user. The task's arguments - * are returned as stored in the database. + /** Return all tasks of the given name and user. The task's arguments are returned as + * stored in the database. */ def getByNameRaw(scope: UserTaskScope, name: Ident): Stream[F, UserTask[String]] - /** Return all tasks of the given name and user. The task's arguments - * are decoded using the given json decoder. + /** Return all tasks of the given name and user. The task's arguments are decoded using + * the given json decoder. */ def getByName[A](scope: UserTaskScope, name: Ident)(implicit D: Decoder[A] @@ -57,9 +54,8 @@ trait UserTaskStore[F[_]] { /** Updates or inserts the given task. * - * The task is identified by its id. If no task with this id - * exists, a new one is created. Otherwise the existing task is - * updated. + * The task is identified by its id. If no task with this id exists, a new one is + * created. Otherwise the existing task is updated. */ def updateTask[A](scope: UserTaskScope, subject: Option[String], ut: UserTask[A])( implicit E: Encoder[A] @@ -69,15 +65,13 @@ trait UserTaskStore[F[_]] { */ def deleteTask(scope: UserTaskScope, id: Ident): F[Int] - /** Return the task of the given user and name. If multiple exists, an - * error is returned. The task's arguments are returned as stored - * in the database. + /** Return the task of the given user and name. If multiple exists, an error is + * returned. The task's arguments are returned as stored in the database. */ def getOneByNameRaw(scope: UserTaskScope, name: Ident): OptionT[F, UserTask[String]] - /** Return the task of the given user and name. If multiple exists, an - * error is returned. The task's arguments are decoded using the - * given json decoder. + /** Return the task of the given user and name. If multiple exists, an error is + * returned. The task's arguments are decoded using the given json decoder. */ def getOneByName[A](scope: UserTaskScope, name: Ident)(implicit D: Decoder[A] @@ -85,14 +79,12 @@ trait UserTaskStore[F[_]] { /** Updates or inserts the given task. * - * Unlike `updateTask`, this ensures that there is at most one task - * of some name in the db. Multiple same tasks (task with same - * name) may not be allowed to run, depending on what they do. - * This is not ensured by the database, though. + * Unlike `updateTask`, this ensures that there is at most one task of some name in the + * db. Multiple same tasks (task with same name) may not be allowed to run, depending + * on what they do. This is not ensured by the database, though. * - * If there are currently multiple tasks with same name as `ut` for - * the user `account`, they will all be removed and the given task - * inserted! + * If there are currently multiple tasks with same name as `ut` for the user `account`, + * they will all be removed and the given task inserted! */ def updateOneTask[A](scope: UserTaskScope, subject: Option[String], ut: UserTask[A])( implicit E: Encoder[A] @@ -155,7 +147,7 @@ object UserTaskStore { .flatMap { case Nil => (None: Option[UserTask[String]]).pure[F] case ut :: Nil => ut.some.pure[F] - case _ => Async[F].raiseError(new Exception("More than one result found")) + case _ => Async[F].raiseError(new Exception("More than one result found")) } ) diff --git a/project/Dependencies.scala b/project/Dependencies.scala index 5f87723c..d0a4d4d5 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -271,8 +271,8 @@ object Dependencies { "org.typelevel" %% "munit-cats-effect-3" % MUnitCatsEffectVersion ) - val kindProjectorPlugin = "org.typelevel" %% "kind-projector" % KindProjectorVersion - val betterMonadicFor = "com.olegpy" %% "better-monadic-for" % BetterMonadicForVersion + val kindProjectorPlugin = "org.typelevel" %% "kind-projector" % KindProjectorVersion + val betterMonadicFor = "com.olegpy" %% "better-monadic-for" % BetterMonadicForVersion val webjars = Seq( "org.webjars" % "swagger-ui" % SwaggerUIVersion, diff --git a/project/NerModelsPlugin.scala b/project/NerModelsPlugin.scala index 8d8fbb2c..50be5c79 100644 --- a/project/NerModelsPlugin.scala +++ b/project/NerModelsPlugin.scala @@ -3,17 +3,16 @@ package docspell.build import sbt.{Def, _} import sbt.Keys._ -/** Take some files from dependencies and put them into the resources - * of a local sbt project. +/** Take some files from dependencies and put them into the resources of a local sbt + * project. * - * The reason is that the stanford ner model files are very very - * large: the jar file for the english models is about 1G and the jar - * file for the german models is about 170M. But I only need one file - * that is about 60M from each jar. So just for the sake to save 1GB - * file size when packaging docspell, this ugly plugin exists…. + * The reason is that the stanford ner model files are very very large: the jar file for + * the english models is about 1G and the jar file for the german models is about 170M. + * But I only need one file that is about 60M from each jar. So just for the sake to save + * 1GB file size when packaging docspell, this ugly plugin exists…. * - * The jar files to filter must be added to the libraryDependencies - * in config "NerModels". + * The jar files to filter must be added to the libraryDependencies in config + * "NerModels". */ object NerModelsPlugin extends AutoPlugin { diff --git a/project/StylesPlugin.scala b/project/StylesPlugin.scala index ad8671d7..83b551f5 100644 --- a/project/StylesPlugin.scala +++ b/project/StylesPlugin.scala @@ -6,9 +6,8 @@ import scala.sys.process._ /** Integrates the tailwind build into sbt. * - * It assumes the required config (postcss.conf.js, - * tailwind.config.js) files in the base directory. It requires to - * have nodejs installed and the npx command available (or + * It assumes the required config (postcss.conf.js, tailwind.config.js) files in the base + * directory. It requires to have nodejs installed and the npx command available (or * configured). */ object StylesPlugin extends AutoPlugin { @@ -43,7 +42,7 @@ object StylesPlugin extends AutoPlugin { "META-INF" / "resources" / "webjars" / name.value / version.value, stylesNpxCommand := "npx", stylesNpmCommand := "npm", - stylesMode := StylesMode.Dev, + stylesMode := StylesMode.Dev, stylesBuild := { val logger = streams.value.log val npx = stylesNpxCommand.value @@ -98,7 +97,7 @@ object StylesPlugin extends AutoPlugin { Seq( npx, "postcss", - s"${inDir}/*.css", + s"$inDir/*.css", "-o", target.absolutePath, "--env", diff --git a/project/ZolaPlugin.scala b/project/ZolaPlugin.scala index fab7c076..9051f94b 100644 --- a/project/ZolaPlugin.scala +++ b/project/ZolaPlugin.scala @@ -28,9 +28,9 @@ object ZolaPlugin extends AutoPlugin { def zolaSettings: Seq[Setting[_]] = Seq( - zolaRootDir := baseDirectory.value / "site", - zolaOutputDir := target.value / "zola-site", - zolaCommand := "zola", + zolaRootDir := baseDirectory.value / "site", + zolaOutputDir := target.value / "zola-site", + zolaCommand := "zola", zolaTestBaseUrl := "http://localhost:1234", zolaBuild := { val logger = streams.value.log