mirror of
https://github.com/TheAnachronism/docspell.git
synced 2025-06-22 02:18:26 +00:00
Adopt to new loggin api
This commit is contained in:
@ -9,6 +9,7 @@ package docspell.store.file
|
||||
import docspell.common
|
||||
import docspell.common._
|
||||
import docspell.files.TikaMimetype
|
||||
import docspell.logging.Logger
|
||||
|
||||
import binny._
|
||||
import scodec.bits.ByteVector
|
||||
|
@ -32,7 +32,6 @@ trait FileRepository[F[_]] {
|
||||
}
|
||||
|
||||
object FileRepository {
|
||||
private[this] val logger = org.log4s.getLogger
|
||||
|
||||
def genericJDBC[F[_]: Sync](
|
||||
xa: Transactor[F],
|
||||
@ -41,7 +40,7 @@ object FileRepository {
|
||||
): FileRepository[F] = {
|
||||
val attrStore = new AttributeStore[F](xa)
|
||||
val cfg = JdbcStoreConfig("filechunk", chunkSize, BinnyUtils.TikaContentTypeDetect)
|
||||
val log = Logger.log4s[F](logger)
|
||||
val log = docspell.logging.getLogger[F]
|
||||
val binStore = GenericJdbcStore[F](ds, BinnyUtils.LoggerAdapter(log), cfg, attrStore)
|
||||
val keyFun: FileKey => BinaryId = BinnyUtils.fileKeyToBinaryId
|
||||
|
||||
|
@ -13,7 +13,6 @@ import cats.implicits._
|
||||
import fs2.Stream
|
||||
|
||||
import docspell.common._
|
||||
import docspell.common.syntax.all._
|
||||
import docspell.store.Store
|
||||
import docspell.store.qb.DSL._
|
||||
import docspell.store.qb._
|
||||
@ -22,8 +21,6 @@ import docspell.store.records._
|
||||
import doobie._
|
||||
|
||||
object QAttachment {
|
||||
private[this] val logger = org.log4s.getLogger
|
||||
|
||||
private val a = RAttachment.as("a")
|
||||
private val item = RItem.as("i")
|
||||
private val am = RAttachmentMeta.as("am")
|
||||
@ -79,13 +76,14 @@ object QAttachment {
|
||||
* item and should not be used to delete a *single* attachment where the item should
|
||||
* stay.
|
||||
*/
|
||||
private def deleteAttachment[F[_]: Sync](store: Store[F])(ra: RAttachment): F[Int] =
|
||||
private def deleteAttachment[F[_]: Sync](store: Store[F])(ra: RAttachment): F[Int] = {
|
||||
val logger = docspell.logging.getLogger[F]
|
||||
for {
|
||||
_ <- logger.fdebug[F](s"Deleting attachment: ${ra.id.id}")
|
||||
_ <- logger.debug(s"Deleting attachment: ${ra.id.id}")
|
||||
s <- store.transact(RAttachmentSource.findById(ra.id))
|
||||
p <- store.transact(RAttachmentPreview.findById(ra.id))
|
||||
n <- store.transact(RAttachment.delete(ra.id))
|
||||
_ <- logger.fdebug[F](
|
||||
_ <- logger.debug(
|
||||
s"Deleted $n meta records (source, meta, preview, archive). Deleting binaries now."
|
||||
)
|
||||
f <-
|
||||
@ -96,6 +94,7 @@ object QAttachment {
|
||||
.compile
|
||||
.foldMonoid
|
||||
} yield n + f
|
||||
}
|
||||
|
||||
def deleteArchive[F[_]: Sync](store: Store[F])(attachId: Ident): F[Int] =
|
||||
(for {
|
||||
@ -112,16 +111,18 @@ object QAttachment {
|
||||
|
||||
def deleteItemAttachments[F[_]: Sync](
|
||||
store: Store[F]
|
||||
)(itemId: Ident, coll: Ident): F[Int] =
|
||||
)(itemId: Ident, coll: Ident): F[Int] = {
|
||||
val logger = docspell.logging.getLogger[F]
|
||||
for {
|
||||
ras <- store.transact(RAttachment.findByItemAndCollective(itemId, coll))
|
||||
_ <- logger.finfo[F](
|
||||
_ <- logger.info(
|
||||
s"Have ${ras.size} attachments to delete. Must first delete archive entries"
|
||||
)
|
||||
a <- ras.traverse(a => deleteArchive(store)(a.id))
|
||||
_ <- logger.fdebug[F](s"Deleted ${a.sum} archive entries")
|
||||
_ <- logger.debug(s"Deleted ${a.sum} archive entries")
|
||||
ns <- ras.traverse(deleteAttachment[F](store))
|
||||
} yield ns.sum
|
||||
}
|
||||
|
||||
def getMetaProposals(itemId: Ident, coll: Ident): ConnectionIO[MetaProposalList] = {
|
||||
val qa = Select(
|
||||
|
@ -14,7 +14,6 @@ import cats.effect.Sync
|
||||
import cats.implicits._
|
||||
import fs2.Stream
|
||||
|
||||
import docspell.common.syntax.all._
|
||||
import docspell.common.{FileKey, IdRef, _}
|
||||
import docspell.query.ItemQuery
|
||||
import docspell.store.Store
|
||||
@ -25,10 +24,9 @@ import docspell.store.records._
|
||||
|
||||
import doobie.implicits._
|
||||
import doobie.{Query => _, _}
|
||||
import org.log4s.getLogger
|
||||
|
||||
object QItem {
|
||||
private[this] val logger = getLogger
|
||||
private[this] val logger = docspell.logging.getLogger[ConnectionIO]
|
||||
|
||||
private val equip = REquipment.as("e")
|
||||
private val org = ROrganization.as("o")
|
||||
@ -81,7 +79,7 @@ object QItem {
|
||||
)
|
||||
]
|
||||
.option
|
||||
logger.trace(s"Find item query: $cq")
|
||||
logger.asUnsafe.trace(s"Find item query: $cq")
|
||||
val attachs = RAttachment.findByItemWithMeta(id)
|
||||
val sources = RAttachmentSource.findByItemWithMeta(id)
|
||||
val archives = RAttachmentArchive.findByItemWithMeta(id)
|
||||
@ -181,8 +179,8 @@ object QItem {
|
||||
.changeWhere(c => c && queryCondition(today, q.fix.account.collective, q.cond))
|
||||
.limit(batch)
|
||||
.build
|
||||
logger.trace(s"List $batch items: $sql")
|
||||
sql.query[ListItem].stream
|
||||
logger.stream.trace(s"List $batch items: $sql").drain ++
|
||||
sql.query[ListItem].stream
|
||||
}
|
||||
|
||||
def searchStats(today: LocalDate)(q: Query): ConnectionIO[SearchSummary] =
|
||||
@ -359,8 +357,7 @@ object QItem {
|
||||
query.attemptSql.flatMap {
|
||||
case Right(res) => res.pure[ConnectionIO]
|
||||
case Left(ex) =>
|
||||
Logger
|
||||
.log4s[ConnectionIO](logger)
|
||||
logger
|
||||
.error(ex)(
|
||||
s"Calculating custom field summary failed. You may have invalid custom field values according to their type."
|
||||
) *>
|
||||
@ -405,8 +402,8 @@ object QItem {
|
||||
.orderBy(Tids.weight.desc)
|
||||
.build
|
||||
|
||||
logger.trace(s"fts query: $from")
|
||||
from.query[ListItem].stream
|
||||
logger.stream.trace(s"fts query: $from").drain ++
|
||||
from.query[ListItem].stream
|
||||
}
|
||||
|
||||
/** Same as `findItems` but resolves the tags for each item. Note that this is
|
||||
@ -515,8 +512,8 @@ object QItem {
|
||||
excludeFileMeta: Set[FileKey]
|
||||
): ConnectionIO[Vector[RItem]] = {
|
||||
val qq = findByChecksumQuery(checksum, collective, excludeFileMeta).build
|
||||
logger.debug(s"FindByChecksum: $qq")
|
||||
qq.query[RItem].to[Vector]
|
||||
logger.debug(s"FindByChecksum: $qq") *>
|
||||
qq.query[RItem].to[Vector]
|
||||
}
|
||||
|
||||
def findByChecksumQuery(
|
||||
@ -695,7 +692,7 @@ object QItem {
|
||||
|
||||
private def contentMax(maxLen: Int): SelectExpr =
|
||||
if (maxLen <= 0) {
|
||||
logger.debug("Max text length limit disabled")
|
||||
logger.asUnsafe.debug("Max text length limit disabled")
|
||||
m.content.s
|
||||
} else substring(m.content.s, 0, maxLen).s
|
||||
|
||||
@ -703,11 +700,11 @@ object QItem {
|
||||
q: Select
|
||||
): ConnectionIO[TextAndTag] =
|
||||
for {
|
||||
_ <- logger.ftrace[ConnectionIO](
|
||||
_ <- logger.trace(
|
||||
s"query: $q (${itemId.id}, ${collective.id})"
|
||||
)
|
||||
texts <- q.build.query[(String, Option[TextAndTag.TagName])].to[List]
|
||||
_ <- logger.ftrace[ConnectionIO](
|
||||
_ <- logger.trace(
|
||||
s"Got ${texts.size} text and tag entries for item ${itemId.id}"
|
||||
)
|
||||
tag = texts.headOption.flatMap(_._2)
|
||||
|
@ -12,7 +12,6 @@ import cats.implicits._
|
||||
import fs2.Stream
|
||||
|
||||
import docspell.common._
|
||||
import docspell.common.syntax.all._
|
||||
import docspell.store.Store
|
||||
import docspell.store.qb.DSL._
|
||||
import docspell.store.qb._
|
||||
@ -20,10 +19,9 @@ import docspell.store.records.{RJob, RJobGroupUse, RJobLog}
|
||||
|
||||
import doobie._
|
||||
import doobie.implicits._
|
||||
import org.log4s._
|
||||
|
||||
object QJob {
|
||||
private[this] val logger = getLogger
|
||||
private[this] val cioLogger = docspell.logging.getLogger[ConnectionIO]
|
||||
|
||||
def takeNextJob[F[_]: Async](
|
||||
store: Store[F]
|
||||
@ -31,13 +29,14 @@ object QJob {
|
||||
priority: Ident => F[Priority],
|
||||
worker: Ident,
|
||||
retryPause: Duration
|
||||
): F[Option[RJob]] =
|
||||
): F[Option[RJob]] = {
|
||||
val logger = docspell.logging.getLogger[F]
|
||||
Stream
|
||||
.range(0, 10)
|
||||
.evalMap(n => takeNextJob1(store)(priority, worker, retryPause, n))
|
||||
.evalTap { x =>
|
||||
if (x.isLeft)
|
||||
logger.fdebug[F](
|
||||
logger.debug(
|
||||
"Cannot mark job, probably due to concurrent updates. Will retry."
|
||||
)
|
||||
else ().pure[F]
|
||||
@ -48,12 +47,13 @@ object QJob {
|
||||
Stream.emit(job)
|
||||
case Left(_) =>
|
||||
Stream
|
||||
.eval(logger.fwarn[F]("Cannot mark job, even after retrying. Give up."))
|
||||
.eval(logger.warn("Cannot mark job, even after retrying. Give up."))
|
||||
.map(_ => None)
|
||||
}
|
||||
.compile
|
||||
.last
|
||||
.map(_.flatten)
|
||||
}
|
||||
|
||||
private def takeNextJob1[F[_]: Async](store: Store[F])(
|
||||
priority: Ident => F[Priority],
|
||||
@ -61,6 +61,7 @@ object QJob {
|
||||
retryPause: Duration,
|
||||
currentTry: Int
|
||||
): F[Either[Unit, Option[RJob]]] = {
|
||||
val logger = docspell.logging.getLogger[F]
|
||||
// if this fails, we have to restart takeNextJob
|
||||
def markJob(job: RJob): F[Either[Unit, RJob]] =
|
||||
store.transact(for {
|
||||
@ -68,25 +69,25 @@ object QJob {
|
||||
_ <-
|
||||
if (n == 1) RJobGroupUse.setGroup(RJobGroupUse(job.group, worker))
|
||||
else 0.pure[ConnectionIO]
|
||||
_ <- logger.fdebug[ConnectionIO](
|
||||
_ <- cioLogger.debug(
|
||||
s"Scheduled job ${job.info} to worker ${worker.id}"
|
||||
)
|
||||
} yield if (n == 1) Right(job) else Left(()))
|
||||
|
||||
for {
|
||||
_ <- logger.ftrace[F](
|
||||
_ <- logger.trace(
|
||||
s"About to take next job (worker ${worker.id}), try $currentTry"
|
||||
)
|
||||
now <- Timestamp.current[F]
|
||||
group <- store.transact(selectNextGroup(worker, now, retryPause))
|
||||
_ <- logger.ftrace[F](s"Choose group ${group.map(_.id)}")
|
||||
_ <- logger.trace(s"Choose group ${group.map(_.id)}")
|
||||
prio <- group.map(priority).getOrElse((Priority.Low: Priority).pure[F])
|
||||
_ <- logger.ftrace[F](s"Looking for job of prio $prio")
|
||||
_ <- logger.trace(s"Looking for job of prio $prio")
|
||||
job <-
|
||||
group
|
||||
.map(g => store.transact(selectNextJob(g, prio, retryPause, now)))
|
||||
.getOrElse((None: Option[RJob]).pure[F])
|
||||
_ <- logger.ftrace[F](s"Found job: ${job.map(_.info)}")
|
||||
_ <- logger.trace(s"Found job: ${job.map(_.info)}")
|
||||
res <- job.traverse(j => markJob(j))
|
||||
} yield res.map(_.map(_.some)).getOrElse {
|
||||
if (group.isDefined)
|
||||
@ -138,7 +139,7 @@ object QJob {
|
||||
.limit(1)
|
||||
|
||||
val frag = groups.build
|
||||
logger.trace(
|
||||
cioLogger.trace(
|
||||
s"nextGroupQuery: $frag (now=${now.toMillis}, pause=${initialPause.millis})"
|
||||
)
|
||||
|
||||
@ -206,7 +207,8 @@ object QJob {
|
||||
_ <- store.transact(RJob.setRunning(id, workerId, now))
|
||||
} yield ()
|
||||
|
||||
def setFinalState[F[_]: Async](id: Ident, state: JobState, store: Store[F]): F[Unit] =
|
||||
def setFinalState[F[_]: Async](id: Ident, state: JobState, store: Store[F]): F[Unit] = {
|
||||
val logger = docspell.logging.getLogger[F]
|
||||
state match {
|
||||
case JobState.Success =>
|
||||
setSuccess(id, store)
|
||||
@ -217,8 +219,9 @@ object QJob {
|
||||
case JobState.Stuck =>
|
||||
setStuck(id, store)
|
||||
case _ =>
|
||||
logger.ferror[F](s"Invalid final state: $state.")
|
||||
logger.error(s"Invalid final state: $state.")
|
||||
}
|
||||
}
|
||||
|
||||
def exceedsRetries[F[_]: Async](id: Ident, max: Int, store: Store[F]): F[Boolean] =
|
||||
store.transact(RJob.getRetries(id)).map(n => n.forall(_ >= max))
|
||||
|
@ -16,7 +16,7 @@ import docspell.store.records._
|
||||
import doobie._
|
||||
|
||||
object QUser {
|
||||
private val logger = Logger.log4s[ConnectionIO](org.log4s.getLogger)
|
||||
private[this] val logger = docspell.logging.getLogger[ConnectionIO]
|
||||
|
||||
final case class UserData(
|
||||
ownedFolders: List[Ident],
|
||||
|
@ -14,8 +14,6 @@ import docspell.store.Store
|
||||
import docspell.store.queries.QJob
|
||||
import docspell.store.records.RJob
|
||||
|
||||
import org.log4s.getLogger
|
||||
|
||||
trait JobQueue[F[_]] {
|
||||
|
||||
/** Inserts the job into the queue to get picked up as soon as possible. The job must
|
||||
@ -44,7 +42,7 @@ trait JobQueue[F[_]] {
|
||||
object JobQueue {
|
||||
def apply[F[_]: Async](store: Store[F]): Resource[F, JobQueue[F]] =
|
||||
Resource.pure[F, JobQueue[F]](new JobQueue[F] {
|
||||
private[this] val logger = Logger.log4s(getLogger)
|
||||
private[this] val logger = docspell.logging.getLogger[F]
|
||||
|
||||
def nextJob(
|
||||
prio: Ident => F[Priority],
|
||||
|
@ -10,13 +10,10 @@ import cats.effect._
|
||||
import cats.implicits._
|
||||
|
||||
import docspell.common._
|
||||
import docspell.common.syntax.all._
|
||||
import docspell.store.queries.QPeriodicTask
|
||||
import docspell.store.records._
|
||||
import docspell.store.{AddResult, Store}
|
||||
|
||||
import org.log4s.getLogger
|
||||
|
||||
trait PeriodicTaskStore[F[_]] {
|
||||
|
||||
/** Get the free periodic task due next and reserve it to the given worker.
|
||||
@ -44,11 +41,10 @@ trait PeriodicTaskStore[F[_]] {
|
||||
}
|
||||
|
||||
object PeriodicTaskStore {
|
||||
private[this] val logger = getLogger
|
||||
|
||||
def create[F[_]: Sync](store: Store[F]): Resource[F, PeriodicTaskStore[F]] =
|
||||
Resource.pure[F, PeriodicTaskStore[F]](new PeriodicTaskStore[F] {
|
||||
|
||||
private[this] val logger = docspell.logging.getLogger[F]
|
||||
def takeNext(
|
||||
worker: Ident,
|
||||
excludeId: Option[Ident]
|
||||
@ -91,7 +87,7 @@ object PeriodicTaskStore {
|
||||
store
|
||||
.transact(QPeriodicTask.clearWorkers(name))
|
||||
.flatMap { n =>
|
||||
if (n > 0) logger.finfo(s"Clearing $n periodic tasks from worker ${name.id}")
|
||||
if (n > 0) logger.info(s"Clearing $n periodic tasks from worker ${name.id}")
|
||||
else ().pure[F]
|
||||
}
|
||||
|
||||
|
@ -192,7 +192,7 @@ object RNotificationChannel {
|
||||
): OptionT[ConnectionIO, RNotificationChannel] =
|
||||
for {
|
||||
time <- OptionT.liftF(Timestamp.current[ConnectionIO])
|
||||
logger = Logger.log4s[ConnectionIO](org.log4s.getLogger)
|
||||
logger = docspell.logging.getLogger[ConnectionIO]
|
||||
r <-
|
||||
channel match {
|
||||
case Channel.Mail(_, name, conn, recipients) =>
|
||||
|
Reference in New Issue
Block a user