Compare commits
6 Commits
Author | SHA1 | Date |
---|---|---|
Jonas Letzbor | 3843ebab9f | |
Jonas Letzbor | 55ad1fbfee | |
Jonas Letzbor | bfc1ad40a5 | |
Jonas Letzbor | 91f2e1abcb | |
Letzbor Jonas | fac781a053 | |
Letzbor Jonas | 0b72670917 |
|
@ -21,3 +21,12 @@
|
|||
# Go workspace file
|
||||
go.work
|
||||
|
||||
# Locally used configuration file
|
||||
/config.yaml
|
||||
/ncConverter.json
|
||||
|
||||
# Vite build file
|
||||
/.vite
|
||||
|
||||
# Log files
|
||||
*.log
|
|
@ -0,0 +1,10 @@
|
|||
## Ausführen
|
||||
|
||||
set GOTMPDIR=C:\MYCOMP
|
||||
go run .\cmd\web
|
||||
npm run dev
|
||||
|
||||
npm run build
|
||||
|
||||
|
||||
nodemon --watch './**/*.go' --signal SIGTERM --exec 'go' run cmd/MyProgram/main.go
|
31
README.md
31
README.md
|
@ -1,3 +1,32 @@
|
|||
# ncDocConverter
|
||||
|
||||
A Go program able to convert Office Documents automatically to PDF / EPUB Files via OnlyOffice
|
||||
A Go program able to convert documents automatically to PDF / EPUB Files.
|
||||
|
||||
Currently, the following sources for documents are supported:
|
||||
|
||||
* Nextcloud with OnlyOffice
|
||||
* Boockstack
|
||||
|
||||
As a destination to save the converted files only **Nextcloud** is supported.
|
||||
|
||||
|
||||
## Setting it up
|
||||
|
||||
For using the
|
||||
|
||||
|
||||
### BookStack
|
||||
|
||||
For converting books of BookStack you need to create an API token for the user to access the books:
|
||||
1. Login as Admin
|
||||
2. Go to *Settings → Users*
|
||||
3. Select user for API access
|
||||
4. Scroll down to `API Tokens` and click `CREATE TOKEN`
|
||||
5. Set a name and expire date. Click `save`
|
||||
6. Copy the ID and Token. The field `apiToken` will contain the combination from `id:token`
|
||||
|
||||
Now you need also create a new role or edit an existing role.
|
||||
1. Go to *Settings → Roles*
|
||||
2. Edit and existing Role (the role which the user have) or create a new role
|
||||
3. Check the box `Access system API` and `Export content` in `System permissions`
|
||||
4. Assing View Role *(all and own)* for *Shelves, Books, Chapters and Pages*
|
||||
|
|
|
@ -0,0 +1,73 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"rpjosh.de/ncDocConverter/internal/models"
|
||||
"rpjosh.de/ncDocConverter/internal/ncworker"
|
||||
"rpjosh.de/ncDocConverter/pkg/logger"
|
||||
)
|
||||
|
||||
type WebApplication struct {
|
||||
logger *logger.Logger
|
||||
config *models.WebConfig
|
||||
}
|
||||
|
||||
func main() {
|
||||
defer logger.CloseFile()
|
||||
|
||||
config, err := models.SetConfig()
|
||||
if err != nil {
|
||||
logger.Error(err.Error())
|
||||
}
|
||||
|
||||
tlsConfig := &tls.Config{
|
||||
CurvePreferences: []tls.CurveID{tls.X25519, tls.CurveP256},
|
||||
MinVersion: tls.VersionTLS12,
|
||||
CipherSuites: []uint16{
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
|
||||
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
},
|
||||
}
|
||||
|
||||
webApp := WebApplication{
|
||||
logger: logger.GetGlobalLogger(),
|
||||
config: config,
|
||||
}
|
||||
|
||||
srv := &http.Server{
|
||||
Addr: config.Server.Address,
|
||||
ErrorLog: nil,
|
||||
Handler: webApp.routes(),
|
||||
TLSConfig: tlsConfig,
|
||||
IdleTimeout: time.Minute,
|
||||
ReadTimeout: 5 * time.Second,
|
||||
WriteTimeout: 10 * time.Second,
|
||||
}
|
||||
|
||||
ncConvertUsers, err := models.ParseConvertUsers("./ncConverter.json")
|
||||
if err != nil {
|
||||
logger.Error("Unable to parse the file %s: %s", "dd", err)
|
||||
}
|
||||
ncworker.NewScheduler(ncConvertUsers, config)
|
||||
|
||||
if 1 == 1 {
|
||||
return
|
||||
}
|
||||
|
||||
logger.Info("Server started on %s", config.Server.Address)
|
||||
var errw error
|
||||
if config.Server.Certificate == "" {
|
||||
errw = srv.ListenAndServe()
|
||||
} else {
|
||||
errw = srv.ListenAndServeTLS(config.Server.Certificate+"cert.pem", config.Server.Certificate+"key.pem")
|
||||
}
|
||||
|
||||
logger.Error("Failed to run the HTTP Server: %s", errw)
|
||||
}
|
|
@ -0,0 +1,68 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"runtime/debug"
|
||||
|
||||
"github.com/justinas/nosurf"
|
||||
"rpjosh.de/ncDocConverter/pkg/logger"
|
||||
)
|
||||
|
||||
func secureHeaders(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Note: This is split across multiple lines for readability. You don't
|
||||
// need to do this in your own code.
|
||||
w.Header().Set("Content-Security-Policy",
|
||||
//"default-src 'self' localhost:*; style-src 'self' fonts.googleapis.com localhost:*; font-src fonts.gstatic.com")
|
||||
"default-src * 'unsafe-inline' 'unsafe-eval'; script-src * 'unsafe-inline' 'unsafe-eval'; connect-src * 'unsafe-inline'; img-src * data: blob: 'unsafe-inline'; frame-src *; style-src * 'unsafe-inline';")
|
||||
w.Header().Set("Referrer-Policy", "origin-when-cross-origin")
|
||||
w.Header().Set("X-Content-Type-Options", "nosniff")
|
||||
w.Header().Set("X-Frame-Options", "deny")
|
||||
w.Header().Set("X-XSS-Protection", "0")
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
func (app *WebApplication) logRequest(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
logger.Info("%s - %s %s %s", r.RemoteAddr, r.Proto, r.Method, r.URL.RequestURI())
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
func (app *WebApplication) recoverPanic(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Create a deferred function (which will always be run in the event
|
||||
// of a panic as Go unwinds the stack).
|
||||
defer func() {
|
||||
// Use the builtin recover function to check if there has been a
|
||||
// panic or not. If there has...
|
||||
if err := recover(); err != nil {
|
||||
// Set a "Connection: close" header on the response.
|
||||
w.Header().Set("Connection", "close")
|
||||
// Call the app.serverError helper method to return a 500
|
||||
// Internal Server response.
|
||||
trace := fmt.Sprintf("%s\n%s", fmt.Errorf("%s", err).Error(), debug.Stack())
|
||||
logger.Error(trace)
|
||||
}
|
||||
}()
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
// Create a NoSurf middleware function which uses a customized CSRF cookie with
|
||||
// the Secure, Path and HttpOnly attributes set.
|
||||
func noSurf(next http.Handler) http.Handler {
|
||||
csrfHandler := nosurf.New(next)
|
||||
csrfHandler.SetBaseCookie(http.Cookie{
|
||||
HttpOnly: true,
|
||||
Path: "/",
|
||||
Secure: true,
|
||||
})
|
||||
|
||||
return csrfHandler
|
||||
}
|
|
@ -0,0 +1,20 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/go-chi/chi/v5/middleware"
|
||||
"rpjosh.de/ncDocConverter/internal/api"
|
||||
)
|
||||
|
||||
func (app *WebApplication) routes() http.Handler {
|
||||
api := api.Api{Logger: app.logger, Config: app.config}
|
||||
|
||||
router := chi.NewRouter()
|
||||
router.Use(middleware.RealIP, app.recoverPanic, app.logRequest, secureHeaders)
|
||||
|
||||
api.SetupServer(router)
|
||||
|
||||
return router
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
server:
|
||||
# Address to listen on
|
||||
address: ":4000"
|
||||
|
||||
# Path to the folder with the certificates file (cert.pem and key.pem) for using TLS
|
||||
certificate: "/etc/letsencrypt/live/"
|
||||
|
||||
# If this parameter is given, all jobs are executed immediately after starting the program.
|
||||
# Afterward the program does exit -> The "execution" field in the jobs are going to be ignored
|
||||
oneShot: false
|
||||
|
||||
logging:
|
||||
# Minimum log Level for printing to the console (debug, info, warning, error, fatal)
|
||||
printLogLevel: info
|
||||
# Minimum log level for writing into the log file (debug, info, warning, error, fatal)
|
||||
writeLogLevel: warning
|
||||
|
||||
# File path to log (empty = disabled)
|
||||
logFilePath: "/home/myUser/logs/ncDocConverter.live"
|
|
@ -0,0 +1,78 @@
|
|||
{
|
||||
"nextcloudUsers": [
|
||||
{
|
||||
// Nextcloud user and instance to save the converted files
|
||||
"nextcloudUrl": "https://cloud.rpjosh.de",
|
||||
"username": "exchange",
|
||||
"password": "Zj9cQ-eF3n6-R6DSt-8sJXf-kYseJ",
|
||||
|
||||
// OnlyOffice (docx, xlsx, ...) convertion to pdf
|
||||
"jobs": [
|
||||
{
|
||||
"jobName": "Convert my books",
|
||||
"sourceDir": "api/",
|
||||
"destinationDir": "ebooks/",
|
||||
|
||||
// Keep folders of source
|
||||
// Otherwise all files will be saved in the destination dir
|
||||
"keepFolders": true,
|
||||
|
||||
// If the folder should be searched recursive
|
||||
"recursive": true,
|
||||
|
||||
// Execution date in the cron format
|
||||
"execution": "45 23 * * 6"
|
||||
}
|
||||
],
|
||||
|
||||
// Conversion from bookStack to pdf/html
|
||||
"bookStack": {
|
||||
"url": "https://wiki.rpjosh.de",
|
||||
"username": "test@rpjosh.de",
|
||||
"apiToken": "typCf2LoSQDHicpeeAZQCDZwAq7BvVdl:PcKMZVDrIwEJeKIKyaD7w0cf20JCjpZz",
|
||||
|
||||
"jobs": [
|
||||
{
|
||||
"jobName": "Convert my favorite books",
|
||||
|
||||
// Shelves to filter -> convert only shelves with the names "Work" and "Linux"
|
||||
// Leave empty to convert books in all shelves
|
||||
"shelves": [ "Work", "Linux" ],
|
||||
// Regex to filter after the shelv name
|
||||
"shelveRegex": "",
|
||||
|
||||
// Books to filter (see shelves for more informations)
|
||||
"books": [],
|
||||
"booksRegex": "",
|
||||
|
||||
// If books which doesn't belong to an shelf should also be converted.
|
||||
// The will be placed in the root folder.
|
||||
// Note that the field "shelves" and "shelveRegex" doesn't work as expected
|
||||
// (Books inside this shelve won't be excluded but will be placed in the root)
|
||||
"includeBooksWithoutShelve": false,
|
||||
|
||||
// Destination folder to save the converted documents in nextcloud
|
||||
"destinationDir": "ebooks/wiki/",
|
||||
|
||||
// Export format (html or pdf)
|
||||
"format": "html",
|
||||
|
||||
// If the books should be saved inside the shelves folder
|
||||
// Otherwise all files will be saved in the destination dir
|
||||
"keepStructure": true,
|
||||
|
||||
// Execution date in the cron format
|
||||
"execution": "45 23 * * 6",
|
||||
|
||||
// The fetching of books and shelves can be ressource hungry. This value specifies the number
|
||||
// of jobs that are executed with cached data.
|
||||
// Note that new or deleted books and shelves won't be converted until the cache counter
|
||||
// expires. Changes in existing books will still be noted.
|
||||
// Specify zero to disable the cache
|
||||
"cache": 3
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
{
|
||||
"users": [
|
||||
{
|
||||
"authUser": "Unique Username (from ",
|
||||
"nextcloudUrl": "https://cloud.myDomain.de",
|
||||
"username": "MyUsername",
|
||||
"password": "App Password",
|
||||
|
||||
"jobs": [
|
||||
{
|
||||
"jobName": "",
|
||||
"sourceDir": "Arbeit/",
|
||||
"destinationDir": "Ebooks/",
|
||||
"keepFolders": true,
|
||||
"recursive": true,
|
||||
"execution": [ "01:00", "15:00" ]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
13
go.mod
13
go.mod
|
@ -1,3 +1,16 @@
|
|||
module rpjosh.de/ncDocConverter
|
||||
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/go-chi/chi/v5 v5.0.8 // indirect
|
||||
github.com/go-co-op/gocron v1.18.0 // indirect
|
||||
github.com/go-yaml/yaml v2.1.0+incompatible // indirect
|
||||
github.com/justinas/nosurf v1.1.1 // indirect
|
||||
github.com/robfig/cron/v3 v3.0.1 // indirect
|
||||
golang.org/x/sync v0.1.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
// https://zhwt.github.io/yaml-to-go/
|
||||
// https://www.onlinetool.io/xmltogo/
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
github.com/go-chi/chi/v5 v5.0.7 h1:rDTPXLDHGATaeHvVlLcR4Qe0zftYethFucbjVQ1PxU8=
|
||||
github.com/go-chi/chi/v5 v5.0.7/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
|
||||
github.com/go-chi/chi/v5 v5.0.8 h1:lD+NLqFcAi1ovnVZpsnObHGW4xb4J8lNmoYVfECH1Y0=
|
||||
github.com/go-chi/chi/v5 v5.0.8/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
|
||||
github.com/go-co-op/gocron v1.18.0 h1:SxTyJ5xnSN4byCq7b10LmmszFdxQlSQJod8s3gbnXxA=
|
||||
github.com/go-co-op/gocron v1.18.0/go.mod h1:sD/a0Aadtw5CpflUJ/lpP9Vfdk979Wl1Sg33HPHg0FY=
|
||||
github.com/go-yaml/yaml v2.1.0+incompatible h1:RYi2hDdss1u4YE7GwixGzWwVo47T8UQwnTLB6vQiq+o=
|
||||
github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0=
|
||||
github.com/justinas/nosurf v1.1.1 h1:92Aw44hjSK4MxJeMSyDa7jwuI9GR2J/JCQiaKvXXSlk=
|
||||
github.com/justinas/nosurf v1.1.1/go.mod h1:ALpWdSbuNGy2lZWtyXdjkYv4edL23oSEgfBT1gPJ5BQ=
|
||||
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||
github.com/studio-b12/gowebdav v0.0.0-20220128162035-c7b1ff8a5e62 h1:b2nJXyPCa9HY7giGM+kYcnQ71m14JnGdQabMPmyt++8=
|
||||
github.com/studio-b12/gowebdav v0.0.0-20220128162035-c7b1ff8a5e62/go.mod h1:bHA7t77X/QFExdeAnDzK6vKM34kEZAcE1OX4MfiwjkE=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
|
@ -0,0 +1,17 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"github.com/go-chi/chi/v5"
|
||||
|
||||
"rpjosh.de/ncDocConverter/internal/models"
|
||||
"rpjosh.de/ncDocConverter/pkg/logger"
|
||||
)
|
||||
|
||||
type Api struct {
|
||||
Logger *logger.Logger
|
||||
Config *models.WebConfig
|
||||
}
|
||||
|
||||
func (api *Api) SetupServer(router *chi.Mux) {
|
||||
api.routes(router)
|
||||
}
|
|
@ -0,0 +1,7 @@
|
|||
package api
|
||||
|
||||
import "github.com/go-chi/chi/v5"
|
||||
|
||||
func (api *Api) routes(router *chi.Mux) {
|
||||
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
package models
|
||||
|
||||
// BookStack details to fetch books from
|
||||
type BookStack struct {
|
||||
URL string `json:"url"`
|
||||
Username string `json:"username"`
|
||||
Token string `json:"apiToken"`
|
||||
|
||||
Jobs []BookStackJob `json:"jobs"`
|
||||
}
|
||||
|
||||
// A concrete BookStacksJob
|
||||
type BookStackJob struct {
|
||||
JobName string `json:"jobName"`
|
||||
DestinationDir string `json:"destinationDir"`
|
||||
|
||||
Shelves []string `json:"shelves"`
|
||||
ShelvesRegex string `json:"shelveRegex"`
|
||||
|
||||
Books []string `json:"books"`
|
||||
BooksRegex string `json:"booksRegex"`
|
||||
|
||||
IncludeBooksWithoutShelve bool `json:"includeBooksWithoutShelve"`
|
||||
Format Format `json:"format"`
|
||||
KeepStructure bool `json:"keepStructure"`
|
||||
|
||||
Recursive string `json:"recursive"`
|
||||
Execution string `json:"execution"`
|
||||
|
||||
CacheCount int `json:"cache"`
|
||||
}
|
||||
|
||||
type Format string
|
||||
|
||||
const (
|
||||
HTML Format = "html"
|
||||
PDF Format = "pdf"
|
||||
)
|
|
@ -0,0 +1,57 @@
|
|||
package models
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
)
|
||||
|
||||
// The root nextcloud user where the files are stored
|
||||
// and the files for onlyoffice jobs are defined
|
||||
type NextcloudUser struct {
|
||||
NextcloudBaseUrl string `json:"nextcloudUrl"`
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
|
||||
// OnlyOffice
|
||||
ConvertJobs []NcConvertJob `json:"jobs"`
|
||||
|
||||
// BookStack
|
||||
BookStack BookStack `json:"bookStack"`
|
||||
}
|
||||
|
||||
// A OnlyOffice docs convert job
|
||||
type NcConvertJob struct {
|
||||
JobName string `json:"jobName"`
|
||||
SourceDir string `json:"sourceDir"`
|
||||
DestinationDir string `json:"destinationDir"`
|
||||
KeepFolders string `json:"keepFolders"`
|
||||
Recursive string `json:"recursive"`
|
||||
Execution string `json:"execution"`
|
||||
}
|
||||
|
||||
type NcConvertUsers struct {
|
||||
Users []NextcloudUser `json:"nextcloudUsers"`
|
||||
}
|
||||
|
||||
// Parses the given file to the in memory struct
|
||||
func ParseConvertUsers(filePath string) (*NcConvertUsers, error) {
|
||||
|
||||
file, err := os.OpenFile(filePath, os.O_APPEND|os.O_CREATE, 0644)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open the file '%s': %s", filePath, err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
byteValue, err := ioutil.ReadAll(file)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse 'ncConverter.json': %s", err)
|
||||
}
|
||||
|
||||
var conv NcConvertUsers
|
||||
|
||||
json.Unmarshal(byteValue, &conv)
|
||||
|
||||
return &conv, nil
|
||||
}
|
|
@ -0,0 +1,95 @@
|
|||
package models
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"os"
|
||||
|
||||
yaml "gopkg.in/yaml.v3"
|
||||
"rpjosh.de/ncDocConverter/pkg/logger"
|
||||
)
|
||||
|
||||
type WebConfig struct {
|
||||
Server Server `yaml:"server"`
|
||||
Logging Logging `yaml:"logging"`
|
||||
}
|
||||
|
||||
type Server struct {
|
||||
Address string `yaml:"address"`
|
||||
Certificate string `yaml:"certificate"`
|
||||
OneShot bool `yaml:"oneShot"`
|
||||
}
|
||||
|
||||
type Logging struct {
|
||||
PrintLogLevel string `yaml:"printLogLevel"`
|
||||
WriteLogLevel string `yaml:"writeLogLevel"`
|
||||
LogFilePath string `yaml:"logFilePath"`
|
||||
}
|
||||
|
||||
// Parses the configuration file (.yaml file) to an WebConfiguration
|
||||
func ParseWebConfig(webConfig *WebConfig, file string) (*WebConfig, error) {
|
||||
if file == "" {
|
||||
return webConfig, nil
|
||||
}
|
||||
|
||||
dat, err := os.ReadFile(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := yaml.Unmarshal(dat, &webConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return webConfig, nil
|
||||
}
|
||||
|
||||
func getDefaultConfig() *WebConfig {
|
||||
return &WebConfig{
|
||||
Server: Server{
|
||||
Address: ":4000",
|
||||
},
|
||||
Logging: Logging{
|
||||
PrintLogLevel: "info",
|
||||
WriteLogLevel: "warning",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Applies the cli and the configuration options from the config files
|
||||
func SetConfig() (*WebConfig, error) {
|
||||
configPath := "./config.yaml"
|
||||
// the path of the configuration file is needed first to determine the "default" values
|
||||
for i, arg := range os.Args {
|
||||
if arg == "-config" || arg == "--config" && len(os.Args) > i {
|
||||
configPath = os.Args[i+1]
|
||||
break
|
||||
}
|
||||
}
|
||||
webConfig := getDefaultConfig()
|
||||
webConfig, err := ParseWebConfig(webConfig, configPath)
|
||||
if err != nil {
|
||||
logger.Error("Unable to parse the configuration file '%s': %s", configPath, err)
|
||||
webConfig = getDefaultConfig()
|
||||
err = nil
|
||||
}
|
||||
|
||||
_ = flag.String("config", "./config.yaml", "Path to the configuration file (see configs/config.yaml) for an example")
|
||||
address := flag.String("address", webConfig.Server.Address, "Address and port on which the api and the web server should listen to")
|
||||
printLogLevel := flag.String("printLogLevel", webConfig.Logging.PrintLogLevel, "Minimum log level to log (debug, info, warning, error, fatal)")
|
||||
oneShot := flag.Bool("oneShot", webConfig.Server.OneShot, "All jobs are executed immediately and the program exists afterwards")
|
||||
|
||||
flag.Parse()
|
||||
webConfig.Server.Address = *address
|
||||
webConfig.Logging.PrintLogLevel = *printLogLevel
|
||||
webConfig.Server.OneShot = *oneShot
|
||||
|
||||
defaultLogger := logger.Logger{
|
||||
PrintLevel: logger.GetLevelByName(webConfig.Logging.PrintLogLevel),
|
||||
LogLevel: logger.GetLevelByName(webConfig.Logging.WriteLogLevel),
|
||||
LogFilePath: webConfig.Logging.LogFilePath,
|
||||
PrintSource: true,
|
||||
}
|
||||
logger.SetGlobalLogger(&defaultLogger)
|
||||
|
||||
return webConfig, err
|
||||
}
|
|
@ -0,0 +1,574 @@
|
|||
package ncworker
|
||||
|
||||
// @TODO delete folders for shelves that doesn't exist anyore
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"rpjosh.de/ncDocConverter/internal/models"
|
||||
"rpjosh.de/ncDocConverter/internal/nextcloud"
|
||||
"rpjosh.de/ncDocConverter/pkg/logger"
|
||||
"rpjosh.de/ncDocConverter/pkg/utils"
|
||||
)
|
||||
|
||||
type BsJob struct {
|
||||
job *models.BookStackJob
|
||||
ncUser *models.NextcloudUser
|
||||
|
||||
cacheCount int
|
||||
cacheBooks map[int]book
|
||||
cacheShelves []shelf
|
||||
// If the cache should be usedi n the current execution
|
||||
useCache bool
|
||||
}
|
||||
|
||||
type shelf struct {
|
||||
ID int `json:"id"`
|
||||
Name string `json:"name"`
|
||||
|
||||
// This has to be fetched extra
|
||||
books []int
|
||||
}
|
||||
type shelfDetails struct {
|
||||
ID int `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Tags []string `json:"tags"`
|
||||
Books []struct {
|
||||
ID int `json:"id"`
|
||||
Name string `json:"name"`
|
||||
} `json:"books"`
|
||||
}
|
||||
type shelves struct {
|
||||
Data []shelf `json:"data"`
|
||||
}
|
||||
|
||||
type book struct {
|
||||
ID int `json:"id"`
|
||||
Name string `json:"name"`
|
||||
|
||||
// This has to be calculated of the latest modify page of a page
|
||||
lastModified time.Time
|
||||
// If the book should be ignored to convert
|
||||
ignore bool
|
||||
|
||||
// If the book has been already converted
|
||||
converted bool
|
||||
}
|
||||
type books struct {
|
||||
Data []book `json:"data"`
|
||||
}
|
||||
type bookDetails struct {
|
||||
ID int `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Contents []struct {
|
||||
ID int `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Slug string `json:"slug"`
|
||||
BookID int `json:"book_id"`
|
||||
ChapterID int `json:"chapter_id"`
|
||||
Draft bool `json:"draft"`
|
||||
Template bool `json:"template"`
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
URL string `json:"url"`
|
||||
Type string `json:"type"`
|
||||
} `json:"contents"`
|
||||
Tags []string `json:"tags"`
|
||||
}
|
||||
|
||||
func NewBsJob(job *models.BookStackJob, ncUser *models.NextcloudUser) *BsJob {
|
||||
bsJob := BsJob{
|
||||
job: job,
|
||||
ncUser: ncUser,
|
||||
}
|
||||
|
||||
return &bsJob
|
||||
}
|
||||
|
||||
func (job *BsJob) ExecuteJob() {
|
||||
// Get all existing files in the destination folder
|
||||
destination, err := nextcloud.SearchInDirectory(
|
||||
job.ncUser, job.job.DestinationDir,
|
||||
[]string{
|
||||
"text/html",
|
||||
"application/pdf",
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
logger.Error("Failed to get files in destination directory '%s': %s", job.job.DestinationDir, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Make a map with path as index
|
||||
prefix := "/remote.php/dav/files/" + job.ncUser.Username + "/"
|
||||
destinationMap := nextcloud.ParseSearchResult(destination, prefix, job.job.DestinationDir)
|
||||
|
||||
// Check for cache
|
||||
job.cache()
|
||||
|
||||
// Get all shelves
|
||||
shelves, err := job.getShelves()
|
||||
if err != nil {
|
||||
logger.Error("Failed to get shelves: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Get all books
|
||||
books, err := job.getBooks()
|
||||
if err != nil {
|
||||
logger.Error("Failed to get books: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Index books by path
|
||||
indexedBooks := job.getIndexedBooks(shelves, books)
|
||||
|
||||
// Cache data
|
||||
if job.job.CacheCount > 0 && !job.useCache {
|
||||
job.cacheCount = job.job.CacheCount
|
||||
|
||||
job.cacheShelves = *shelves
|
||||
job.cacheBooks = utils.CopyMap(*books)
|
||||
}
|
||||
|
||||
// Now finally convert the books :)
|
||||
convertCount := 0
|
||||
var wg sync.WaitGroup
|
||||
for i, b := range indexedBooks {
|
||||
// mark as converted
|
||||
indexedBooks[i].converted = true
|
||||
(*books)[b.ID] = *indexedBooks[i]
|
||||
|
||||
// check if it has to be converted again (updated) or for the first time
|
||||
des, exists := destinationMap[i]
|
||||
|
||||
if (!exists || b.lastModified.After(des.LastModified)) && !b.ignore {
|
||||
wg.Add(1)
|
||||
convertCount++
|
||||
go func(book book, path string) {
|
||||
defer wg.Done()
|
||||
job.convertBook(book, path)
|
||||
}(*b, i)
|
||||
} else if b.ignore {
|
||||
logger.Debug("Duplicate book name: %s", b.Name)
|
||||
}
|
||||
|
||||
// Ignore states that a book with a duplicate name exists → delete the orig also
|
||||
if !b.ignore {
|
||||
delete(destinationMap, i)
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Convert remaining books
|
||||
if job.job.IncludeBooksWithoutShelve {
|
||||
for _, b := range *books {
|
||||
// check if it has to be converted again (updated) or for the first time
|
||||
des, exists := destinationMap[b.Name]
|
||||
|
||||
if !b.converted && !b.ignore && (!exists || b.lastModified.After(des.LastModified)) {
|
||||
wg.Add(1)
|
||||
convertCount++
|
||||
go func(book book, path string) {
|
||||
defer wg.Done()
|
||||
job.convertBook(book, path)
|
||||
}(b, b.Name)
|
||||
}
|
||||
delete(destinationMap, b.Name)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// Delete the files which are not available anymore
|
||||
for _, dest := range destinationMap {
|
||||
err := nextcloud.DeleteFile(job.ncUser, dest.Path)
|
||||
if err != nil {
|
||||
logger.Error(utils.FirstCharToUppercase(err.Error()))
|
||||
}
|
||||
}
|
||||
|
||||
logger.Info("Finished BookStack job \"%s\": %d books converted", job.job.JobName, convertCount)
|
||||
}
|
||||
|
||||
// Checks and initializes the cache
|
||||
func (job *BsJob) cache() {
|
||||
if job.job.CacheCount > 0 {
|
||||
job.cacheCount--
|
||||
if job.cacheCount < 0 {
|
||||
job.useCache = false
|
||||
} else {
|
||||
job.useCache = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Return the relative path of the book to save in nextcloud
|
||||
func (job *BsJob) getPath(bookName string, shelfName string) string {
|
||||
if job.job.KeepStructure {
|
||||
return shelfName + "/" + bookName
|
||||
} else {
|
||||
return bookName
|
||||
}
|
||||
}
|
||||
|
||||
// Gets all shelves
|
||||
func (job *BsJob) getShelves() (*[]shelf, error) {
|
||||
if job.useCache {
|
||||
return &job.cacheShelves, nil
|
||||
}
|
||||
|
||||
client := http.Client{Timeout: 10 * time.Second}
|
||||
|
||||
req := job.getRequest(http.MethodGet, "shelves", nil)
|
||||
|
||||
// Add shelve filter
|
||||
q := req.URL.Query()
|
||||
for _, j := range job.job.Shelves {
|
||||
q.Add("filter[name:eq]", j)
|
||||
}
|
||||
req.URL.RawQuery = q.Encode()
|
||||
|
||||
res, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
if res.StatusCode != 200 {
|
||||
return nil, fmt.Errorf("expected status code 200, got %d", res.StatusCode)
|
||||
}
|
||||
|
||||
rtc := shelves{}
|
||||
if err = json.NewDecoder(res.Body).Decode(&rtc); err != nil {
|
||||
return nil, fmt.Errorf("failed to decode response: %s", err)
|
||||
}
|
||||
|
||||
if job.job.ShelvesRegex != "" {
|
||||
reg, err := regexp.Compile(job.job.ShelvesRegex)
|
||||
// This is fatal
|
||||
logger.Fatal("Failed to parse the regex '%s': %s", job.job.ShelvesRegex, err)
|
||||
|
||||
rtc2 := shelves{}
|
||||
|
||||
for i, shelve := range rtc.Data {
|
||||
if reg.Match([]byte(shelve.Name)) {
|
||||
rtc2.Data = append(rtc2.Data, rtc.Data[i])
|
||||
} else {
|
||||
logger.Debug("Ignoring shelve %s", shelve.Name)
|
||||
}
|
||||
}
|
||||
|
||||
rtc = rtc2
|
||||
}
|
||||
|
||||
return &rtc.Data, nil
|
||||
}
|
||||
|
||||
// Returns the IDs of books which belongs to the shelf
|
||||
func (job *BsJob) getBooksInShelve(id int) ([]int, error) {
|
||||
client := http.Client{Timeout: 10 * time.Second}
|
||||
req := job.getRequest(http.MethodGet, "shelves/"+fmt.Sprintf("%d", id), nil)
|
||||
|
||||
res, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
if res.StatusCode != 200 {
|
||||
return nil, fmt.Errorf("expected status code 200, got %d", res.StatusCode)
|
||||
}
|
||||
|
||||
shelfDetails := shelfDetails{}
|
||||
if err = json.NewDecoder(res.Body).Decode(&shelfDetails); err != nil {
|
||||
return nil, fmt.Errorf("failed to decode response: %s", err)
|
||||
}
|
||||
|
||||
rtc := make([]int, len(shelfDetails.Books))
|
||||
for i, details := range shelfDetails.Books {
|
||||
rtc[i] = details.ID
|
||||
}
|
||||
|
||||
return rtc, nil
|
||||
}
|
||||
|
||||
// Indexes the books by the relative path
|
||||
func (job *BsJob) getIndexedBooks(shelves *[]shelf, books *map[int]book) map[string]*book {
|
||||
// Now it has to be checked which book belongs to which shelve.
|
||||
// When cached this was already done
|
||||
if !job.useCache {
|
||||
var wg sync.WaitGroup
|
||||
for i, shelv := range *shelves {
|
||||
wg.Add(1)
|
||||
|
||||
go func(shelf shelf, index int) {
|
||||
defer wg.Done()
|
||||
|
||||
ids, err := job.getBooksInShelve(shelf.ID)
|
||||
if err != nil {
|
||||
logger.Error("Failed to get shelf details: %s", err)
|
||||
} else {
|
||||
b := make([]int, 0)
|
||||
|
||||
for _, id := range ids {
|
||||
// Check if book should be excluded → it is not contained in the book map
|
||||
book, exists := (*books)[id]
|
||||
if exists {
|
||||
b = append(b, book.ID)
|
||||
}
|
||||
}
|
||||
|
||||
(*shelves)[index].books = b
|
||||
}
|
||||
}(shelv, i)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// A book can have the same name. This would lead to conflicts
|
||||
// if they are in the same shelve / folder.
|
||||
// In such a case the ID of the book will be appended to the name "bookName_123".
|
||||
// Because of that a map indexed by the path will be created and AFTERWARDS the file is converted
|
||||
indexedBooks := make(map[string]*book)
|
||||
for _, shelf := range *shelves {
|
||||
for _, bookId := range shelf.books {
|
||||
b := (*books)[bookId]
|
||||
bookPath := job.getPath(b.Name, shelf.Name)
|
||||
existingBook, doesExists := indexedBooks[bookPath]
|
||||
|
||||
if doesExists {
|
||||
// The book path will be appended
|
||||
newBookPath := fmt.Sprintf("%s_%d", bookPath, b.ID)
|
||||
indexedBooks[newBookPath] = &b
|
||||
|
||||
// Also add the other book with the ID
|
||||
otherNewBookPath := fmt.Sprintf("%s_%d", bookPath, existingBook.ID)
|
||||
indexedBooks[otherNewBookPath] = existingBook
|
||||
|
||||
// The original book won't be removed because otherwise a third book with the same
|
||||
// name will be inserted using its real name.
|
||||
// But because this is a pointer, a copy is needed
|
||||
var existingBookCopy book
|
||||
utils.Copy(existingBook, &existingBookCopy)
|
||||
existingBookCopy.ignore = true
|
||||
indexedBooks[bookPath] = &existingBookCopy
|
||||
} else {
|
||||
indexedBooks[bookPath] = &b
|
||||
}
|
||||
}
|
||||
|
||||
// If the structure should be keept, a folder for every shelve has to be created
|
||||
if job.job.KeepStructure && !job.useCache {
|
||||
nextcloud.CreateFoldersRecursively(job.ncUser, job.job.DestinationDir+shelf.Name+"/")
|
||||
}
|
||||
}
|
||||
|
||||
return indexedBooks
|
||||
}
|
||||
|
||||
// Gets all books and returns a map indexed by the ID of the book
|
||||
func (job *BsJob) getBooks() (*map[int]book, error) {
|
||||
if job.useCache {
|
||||
books := utils.CopyMap(job.cacheBooks)
|
||||
|
||||
// The last Change date has to be updated even in cache
|
||||
var wg sync.WaitGroup
|
||||
var mut = &sync.Mutex{}
|
||||
for i, b := range books {
|
||||
wg.Add(1)
|
||||
|
||||
go func(book book, index int) {
|
||||
defer wg.Done()
|
||||
lastModified, err := job.getLastModifiedOfBook(book.ID)
|
||||
if err != nil {
|
||||
logger.Warning("Failed to get last modified date of book %s (%d) - using old date: %s", book.Name, book.ID, err)
|
||||
return
|
||||
}
|
||||
|
||||
book.lastModified = *lastModified
|
||||
|
||||
mut.Lock()
|
||||
books[index] = book
|
||||
mut.Unlock()
|
||||
}(b, i)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return &books, nil
|
||||
}
|
||||
|
||||
client := http.Client{Timeout: 10 * time.Second}
|
||||
req := job.getRequest(http.MethodGet, "books", nil)
|
||||
|
||||
// Add shelve filter
|
||||
q := req.URL.Query()
|
||||
for _, j := range job.job.Books {
|
||||
q.Add("filter[name:eq]", j)
|
||||
}
|
||||
req.URL.RawQuery = q.Encode()
|
||||
|
||||
res, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
if res.StatusCode != 200 {
|
||||
return nil, fmt.Errorf("expected status code 200, got %d", res.StatusCode)
|
||||
}
|
||||
|
||||
booksArray := books{}
|
||||
if err = json.NewDecoder(res.Body).Decode(&booksArray); err != nil {
|
||||
return nil, fmt.Errorf("failed to decode response: %s", err)
|
||||
}
|
||||
|
||||
if job.job.BooksRegex != "" {
|
||||
reg, err := regexp.Compile(job.job.BooksRegex)
|
||||
// This is fatal
|
||||
logger.Fatal("Failed to parse the regex '%s': %s", job.job.BooksRegex, err)
|
||||
|
||||
booksArray2 := books{}
|
||||
|
||||
for i, book := range booksArray.Data {
|
||||
if reg.Match([]byte(book.Name)) {
|
||||
booksArray2.Data = append(booksArray2.Data, booksArray.Data[i])
|
||||
} else {
|
||||
logger.Debug("Ignoring shelve %s", book.Name)
|
||||
}
|
||||
}
|
||||
|
||||
booksArray = booksArray2
|
||||
}
|
||||
|
||||
// Create indexed map
|
||||
rtc := make(map[int]book)
|
||||
var wg sync.WaitGroup
|
||||
var mut = &sync.Mutex{}
|
||||
for _, b := range booksArray.Data {
|
||||
wg.Add(1)
|
||||
|
||||
go func(b book) {
|
||||
defer wg.Done()
|
||||
lastModified, err := job.getLastModifiedOfBook(b.ID)
|
||||
if err != nil {
|
||||
logger.Warning("Failed to get last modified date of book %s (%d) - skipping: %s", b.Name, b.ID, err)
|
||||
return
|
||||
}
|
||||
|
||||
if lastModified.Unix() == 0 {
|
||||
logger.Info("Skipping book %s (%d) because of no content", b.Name, b.ID)
|
||||
return
|
||||
}
|
||||
|
||||
mut.Lock()
|
||||
rtc[b.ID] = book{
|
||||
ID: b.ID,
|
||||
Name: b.Name,
|
||||
lastModified: *lastModified,
|
||||
}
|
||||
mut.Unlock()
|
||||
}(b)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return &rtc, nil
|
||||
}
|
||||
|
||||
// Returns the last modified time of a book
|
||||
func (job *BsJob) getLastModifiedOfBook(id int) (*time.Time, error) {
|
||||
client := http.Client{Timeout: 10 * time.Second}
|
||||
req := job.getRequest(http.MethodGet, "books/"+fmt.Sprintf("%d", id), nil)
|
||||
|
||||
res, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
if res.StatusCode != 200 {
|
||||
return nil, fmt.Errorf("expected status code 200, got %d", res.StatusCode)
|
||||
}
|
||||
|
||||
bd := bookDetails{}
|
||||
if err = json.NewDecoder(res.Body).Decode(&bd); err != nil {
|
||||
return nil, fmt.Errorf("failed to decode response: %s", err)
|
||||
}
|
||||
|
||||
lastMod := time.Unix(0, 0)
|
||||
for i, content := range bd.Contents {
|
||||
if content.Template || content.Draft {
|
||||
continue
|
||||
}
|
||||
|
||||
if content.UpdatedAt.After(lastMod) {
|
||||
lastMod = bd.Contents[i].UpdatedAt
|
||||
}
|
||||
}
|
||||
|
||||
return &lastMod, nil
|
||||
}
|
||||
|
||||
// Returns a new request to the bookStack API.
|
||||
// The path beginning AFTER /api/ should be given (e.g.: shelves)
|
||||
func (job *BsJob) getRequest(method string, path string, body io.Reader) *http.Request {
|
||||
req, err := http.NewRequest(method, job.ncUser.BookStack.URL+"/api/"+path, body)
|
||||
if err != nil {
|
||||
logger.Error("%s", err)
|
||||
}
|
||||
req.Header.Set("Authorization", "Token "+job.ncUser.BookStack.Token)
|
||||
|
||||
return req
|
||||
}
|
||||
|
||||
// Converts the given book and uploads it to nextcloud.
|
||||
// The path is being expected relative to the root dir of the jobs directory and does
|
||||
// not contain a file extension
|
||||
func (job *BsJob) convertBook(book book, path string) {
|
||||
fileExtension, url := job.getFileExtension()
|
||||
|
||||
client := http.Client{Timeout: 10 * time.Second}
|
||||
req := job.getRequest(http.MethodGet, fmt.Sprintf("books/%d/export/%s", book.ID, url), nil)
|
||||
|
||||
res, err := client.Do(req)
|
||||
if err != nil {
|
||||
logger.Error("Failed to convert book: %s", err)
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
if res.StatusCode != 200 {
|
||||
logger.Error("Failed to convert book: expected status code 200, got %d", res.StatusCode)
|
||||
return
|
||||
}
|
||||
|
||||
err = nextcloud.UploadFile(job.ncUser, job.job.DestinationDir+path+fileExtension, res.Body)
|
||||
if err != nil {
|
||||
logger.Error("Failed to upload book to nextcloud: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (job *BsJob) getFileExtension() (fileExtension string, url string) {
|
||||
switch strings.ToLower(string(job.job.Format)) {
|
||||
case "html":
|
||||
{
|
||||
fileExtension = ".html"
|
||||
url = "html"
|
||||
}
|
||||
case "pdf":
|
||||
{
|
||||
fileExtension = ".pdf"
|
||||
url = "pdf"
|
||||
}
|
||||
default:
|
||||
{
|
||||
logger.Fatal("Invalid format given: '%s'. Expected 'html' or 'pdf'", job.job.Format)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
|
@ -0,0 +1,114 @@
|
|||
package ncworker
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-co-op/gocron"
|
||||
"rpjosh.de/ncDocConverter/internal/models"
|
||||
"rpjosh.de/ncDocConverter/pkg/logger"
|
||||
)
|
||||
|
||||
type NcConvertScheduler struct {
|
||||
users *models.NcConvertUsers
|
||||
config *models.WebConfig
|
||||
|
||||
scheduler *gocron.Scheduler
|
||||
}
|
||||
|
||||
func NewScheduler(users *models.NcConvertUsers, config *models.WebConfig) *NcConvertScheduler {
|
||||
scheduler := NcConvertScheduler{
|
||||
users: users,
|
||||
config: config,
|
||||
scheduler: gocron.NewScheduler(time.Local),
|
||||
}
|
||||
// Don't reschedule a task if it's still running
|
||||
scheduler.scheduler.SingletonMode()
|
||||
scheduler.scheduler.StartAsync()
|
||||
|
||||
if config.Server.OneShot {
|
||||
scheduler.ScheduleExecutionsOneShot()
|
||||
} else {
|
||||
scheduler.ScheduleExecutions()
|
||||
|
||||
fmt.Println("Started in schedule mode.\nType \"exit\" to leave or \"execute\" to execute all jobs")
|
||||
// Endless loop
|
||||
for {
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
text, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
// No console input
|
||||
var wg sync.WaitGroup
|
||||
logger.Debug("No console available")
|
||||
wg.Add(1)
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
input := strings.Trim(strings.ToLower(text), "\n")
|
||||
if input == "exit" {
|
||||
break
|
||||
} else if input == "execute" {
|
||||
scheduler.scheduler.RunAll()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &scheduler
|
||||
}
|
||||
|
||||
// Executes all jobs and exits the program afterwards
|
||||
func (scheduler NcConvertScheduler) ScheduleExecutionsOneShot() {
|
||||
for _, user := range scheduler.users.Users {
|
||||
|
||||
// Schedule Nextcloud jobs
|
||||
for _, job := range user.ConvertJobs {
|
||||
convJob := NewNcJob(&job, &user)
|
||||
convJob.ExecuteJob()
|
||||
}
|
||||
|
||||
// Schedule boockstack jobs
|
||||
if user.BookStack.URL != "" {
|
||||
for _, job := range user.BookStack.Jobs {
|
||||
bsJob := NewBsJob(&job, &user)
|
||||
bsJob.ExecuteJob()
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// Schedules all jobs with gocron
|
||||
func (s NcConvertScheduler) ScheduleExecutions() {
|
||||
for ui, user := range s.users.Users {
|
||||
|
||||
// Schedule Nextcloud jobs
|
||||
for i, job := range user.ConvertJobs {
|
||||
convJob := NewNcJob(&s.users.Users[ui].ConvertJobs[i], &s.users.Users[i])
|
||||
|
||||
_, err := s.scheduler.Cron(job.Execution).DoWithJobDetails(s.executeJob, convJob)
|
||||
if err != nil {
|
||||
logger.Fatal("Failed to schedule office job '%s': %s", job.JobName, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Schedule boockstack jobs
|
||||
if user.BookStack.URL != "" {
|
||||
for i, job := range user.BookStack.Jobs {
|
||||
bsJob := NewBsJob(&s.users.Users[ui].BookStack.Jobs[i], &s.users.Users[i])
|
||||
|
||||
_, err := s.scheduler.Cron(job.Execution).DoWithJobDetails(s.executeJob, bsJob)
|
||||
if err != nil {
|
||||
logger.Fatal("Failed to schedule BookStack job '%s': %s", job.JobName, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s NcConvertScheduler) executeJob(job Job, scheduledJob gocron.Job) {
|
||||
job.ExecuteJob()
|
||||
}
|
|
@ -0,0 +1,5 @@
|
|||
package ncworker
|
||||
|
||||
type Job interface {
|
||||
ExecuteJob()
|
||||
}
|
|
@ -0,0 +1,206 @@
|
|||
package ncworker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"rpjosh.de/ncDocConverter/internal/models"
|
||||
"rpjosh.de/ncDocConverter/internal/nextcloud"
|
||||
"rpjosh.de/ncDocConverter/pkg/logger"
|
||||
"rpjosh.de/ncDocConverter/pkg/utils"
|
||||
)
|
||||
|
||||
type convertJob struct {
|
||||
job *models.NcConvertJob
|
||||
ncUser *models.NextcloudUser
|
||||
}
|
||||
|
||||
type convertQueu struct {
|
||||
source nextcloud.NcFile
|
||||
destination string
|
||||
}
|
||||
|
||||
func NewNcJob(job *models.NcConvertJob, ncUser *models.NextcloudUser) *convertJob {
|
||||
convJob := &convertJob{
|
||||
job: job,
|
||||
ncUser: ncUser,
|
||||
}
|
||||
|
||||
return convJob
|
||||
}
|
||||
|
||||
func (job *convertJob) ExecuteJob() {
|
||||
|
||||
// Get existing directory contents
|
||||
sourceFolder, err := nextcloud.SearchInDirectory(
|
||||
job.ncUser,
|
||||
job.job.SourceDir,
|
||||
[]string{
|
||||
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
|
||||
"application/msword",
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
logger.Error("Failed to get files in source directory '%s': %s", job.job.SourceDir, err)
|
||||
return
|
||||
}
|
||||
|
||||
destinationFolder, err := nextcloud.SearchInDirectory(
|
||||
job.ncUser,
|
||||
job.job.DestinationDir,
|
||||
[]string{
|
||||
"application/pdf",
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
logger.Error("Failed to get files in destination directory '%s': %s", job.job.DestinationDir, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Store all files in a map
|
||||
prefix := "/remote.php/dav/files/" + job.ncUser.Username + "/"
|
||||
sourceMap := nextcloud.ParseSearchResult(sourceFolder, prefix, job.job.SourceDir)
|
||||
destinationMap := nextcloud.ParseSearchResult(destinationFolder, prefix, job.job.DestinationDir)
|
||||
|
||||
// check which files should be converted
|
||||
var filesToConvert []convertQueu
|
||||
var directorys []string
|
||||
|
||||
for index, source := range sourceMap {
|
||||
// Check if the file exists in the destination map
|
||||
if dest, exists := destinationMap[index]; exists {
|
||||
// Compare timestamp and size
|
||||
if dest.LastModified.Before(source.LastModified) {
|
||||
filesToConvert = append(filesToConvert, convertQueu{source: source, destination: dest.Path})
|
||||
}
|
||||
delete(destinationMap, index)
|
||||
} else {
|
||||
// the directory could not be existing -> check for existance
|
||||
destinationDir := job.getDestinationDir(source.Path)
|
||||
appendIfNotExists(&directorys, destinationDir[0:strings.LastIndex(destinationDir, "/")+1])
|
||||
|
||||
filesToConvert = append(filesToConvert, convertQueu{source: source, destination: destinationDir})
|
||||
|
||||
delete(destinationMap, index)
|
||||
}
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Delete the files which are not available anymore
|
||||
wg.Add(len(destinationMap))
|
||||
for _, dest := range destinationMap {
|
||||
go func(file *nextcloud.NcFile) {
|
||||
err := nextcloud.DeleteFile(job.ncUser, dest.Path)
|
||||
if err != nil {
|
||||
logger.Error(utils.FirstCharToUppercase(err.Error()))
|
||||
}
|
||||
wg.Done()
|
||||
}(&dest)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Create required directorys
|
||||
wg.Add(len(directorys))
|
||||
for _, dest := range directorys {
|
||||
go func(path string) {
|
||||
nextcloud.CreateFoldersRecursively(job.ncUser, path)
|
||||
wg.Done()
|
||||
}(dest)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Convert the files
|
||||
wg.Add(len(filesToConvert))
|
||||
for _, file := range filesToConvert {
|
||||
logger.Info("Path: %s", file.source.Path)
|
||||
go func(cvt convertQueu) {
|
||||
job.convertFile(cvt.source.Path, cvt.source.Fileid, cvt.destination)
|
||||
wg.Done()
|
||||
}(file)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
logger.Info("Finished Nextcloud job \"%s\": %d documents converted", job.job.JobName, len(filesToConvert))
|
||||
}
|
||||
|
||||
// Appends the directory to the array if it isn't contained
|
||||
// by another element already
|
||||
func appendIfNotExists(dirs *[]string, directory string) {
|
||||
directoryLength := len(directory)
|
||||
for i, currentDir := range *dirs {
|
||||
currentLength := len(currentDir)
|
||||
|
||||
// the existing directory is already referenced in the current
|
||||
if directoryLength > currentLength && directory[0:currentLength] == currentDir {
|
||||
(*dirs)[i] = directory
|
||||
continue
|
||||
} else if directoryLength <= currentLength && currentDir[0:directoryLength] == directory {
|
||||
continue
|
||||
}
|
||||
}
|
||||
*dirs = append(*dirs, directory)
|
||||
}
|
||||
|
||||
func (job *convertJob) getDestinationDir(sourceFile string) string {
|
||||
sourceFile = sourceFile[len(job.job.SourceDir):]
|
||||
var extension = filepath.Ext(sourceFile)
|
||||
var name = sourceFile[0 : len(sourceFile)-len(extension)]
|
||||
|
||||
return job.job.DestinationDir + name + ".pdf"
|
||||
}
|
||||
|
||||
// Converts the source file to the destination file utilizing the onlyoffice convert api
|
||||
func (job *convertJob) convertFile(sourceFile string, sourceid int, destinationFile string) {
|
||||
logger.Debug("Converting %s (%d) to %s", sourceFile, sourceid, destinationFile)
|
||||
|
||||
client := http.Client{Timeout: 10 * time.Second}
|
||||
req, err := http.NewRequest(http.MethodGet, job.ncUser.NextcloudBaseUrl+"/apps/onlyoffice/downloadas", nil)
|
||||
if err != nil {
|
||||
logger.Error("%s", err)
|
||||
}
|
||||
req.SetBasicAuth(job.ncUser.Username, job.ncUser.Password)
|
||||
|
||||
q := req.URL.Query()
|
||||
q.Add("fileId", fmt.Sprint(sourceid))
|
||||
q.Add("toExtension", "pdf")
|
||||
req.URL.RawQuery = q.Encode()
|
||||
|
||||
res, err := client.Do(req)
|
||||
if err != nil {
|
||||
logger.Error("Failed to access the convert api: %s", err)
|
||||
return
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
if res.StatusCode != 200 {
|
||||
body, _ := io.ReadAll(res.Body)
|
||||
logger.Error("Failed to access the convert api (#%d). Do you have OnlyOffice installed?: %s", res.StatusCode, body)
|
||||
return
|
||||
}
|
||||
|
||||
uploadClient := http.Client{Timeout: 10 * time.Second}
|
||||
uploadReq, err := http.NewRequest(http.MethodPut, job.ncUser.NextcloudBaseUrl+"/remote.php/dav/files/"+job.ncUser.Username+"/"+destinationFile, res.Body)
|
||||
|
||||
if err != nil {
|
||||
logger.Error("%s", err)
|
||||
}
|
||||
uploadReq.SetBasicAuth(job.ncUser.Username, job.ncUser.Password)
|
||||
uploadReq.Header.Set("Content-Type", "application/binary")
|
||||
|
||||
res, err = uploadClient.Do(uploadReq)
|
||||
if err != nil {
|
||||
logger.Error("%s", err)
|
||||
}
|
||||
|
||||
if res.StatusCode != 204 && res.StatusCode != 201 {
|
||||
logger.Error("Failed to create file %s (#%d)", destinationFile, res.StatusCode)
|
||||
}
|
||||
// Status Code 201
|
||||
res.Body.Close()
|
||||
}
|
|
@ -0,0 +1,238 @@
|
|||
package nextcloud
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"rpjosh.de/ncDocConverter/internal/models"
|
||||
"rpjosh.de/ncDocConverter/pkg/logger"
|
||||
"rpjosh.de/ncDocConverter/web"
|
||||
)
|
||||
|
||||
// The internal representation of a nextcloud file
|
||||
type NcFile struct {
|
||||
// File extension: txt
|
||||
Extension string
|
||||
// Relative path of the file to the nextcloud root: /folder/file.txt
|
||||
Path string
|
||||
LastModified time.Time
|
||||
ContentType string
|
||||
// Size in Bytes
|
||||
Size int
|
||||
// The unique file ID of the nextcloud server
|
||||
Fileid int
|
||||
// The Webdav URL for file reference
|
||||
WebdavURL string
|
||||
}
|
||||
|
||||
type searchTemplateData struct {
|
||||
Username string
|
||||
Directory string
|
||||
ContentType []string
|
||||
}
|
||||
|
||||
type searchResult struct {
|
||||
XMLName xml.Name `xml:"multistatus"`
|
||||
Text string `xml:",chardata"`
|
||||
D string `xml:"d,attr"`
|
||||
S string `xml:"s,attr"`
|
||||
Oc string `xml:"oc,attr"`
|
||||
Nc string `xml:"nc,attr"`
|
||||
Response []searchResultResponse `xml:"response"`
|
||||
}
|
||||
type searchResultResponse struct {
|
||||
Text string `xml:",chardata"`
|
||||
Href string `xml:"href"`
|
||||
Propstat struct {
|
||||
Text string `xml:",chardata"`
|
||||
Prop struct {
|
||||
Text string `xml:",chardata"`
|
||||
Getcontenttype string `xml:"getcontenttype"`
|
||||
Getlastmodified string `xml:"getlastmodified"`
|
||||
Size string `xml:"size"`
|
||||
Fileid int `xml:"fileid"`
|
||||
} `xml:"prop"`
|
||||
Status string `xml:"status"`
|
||||
} `xml:"propstat"`
|
||||
}
|
||||
|
||||
func (r *searchResultResponse) GetLastModified() time.Time {
|
||||
// Time format: Fri, 23 Sep 2022 05:46:31 GMT
|
||||
rtc, err := time.Parse("Mon, 02 Jan 2006 15:04:05 GMT", r.Propstat.Prop.Getlastmodified)
|
||||
if err != nil {
|
||||
logger.Warning("%s", err)
|
||||
rtc = time.Unix(0, 1)
|
||||
}
|
||||
|
||||
return rtc
|
||||
}
|
||||
|
||||
// Returns a new request to the Nexcloud API.
|
||||
// The path beginning AFTER /dav/ should be given (e.g.: myUser/folder/file.txt)
|
||||
func getRequest(method string, path string, body io.Reader, ncUser *models.NextcloudUser) *http.Request {
|
||||
req, err := http.NewRequest(method, ncUser.NextcloudBaseUrl+"/remote.php/dav/"+path, body)
|
||||
if err != nil {
|
||||
logger.Error("%s", err)
|
||||
}
|
||||
req.SetBasicAuth(ncUser.Username, ncUser.Password)
|
||||
|
||||
return req
|
||||
}
|
||||
|
||||
// Searches for all files of the given content type starting in the given directory.
|
||||
func SearchInDirectory(ncUser *models.NextcloudUser, directory string, contentType []string) (*searchResult, error) {
|
||||
client := http.Client{Timeout: 5 * time.Second}
|
||||
|
||||
template, err := template.ParseFS(web.ApiTemplateFiles, "apitemplate/ncsearch.tmpl.xml")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
templateData := searchTemplateData{
|
||||
Username: ncUser.Username,
|
||||
Directory: directory,
|
||||
ContentType: contentType,
|
||||
}
|
||||
if err = template.Execute(&buf, templateData); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Status code 207
|
||||
req := getRequest("SEARCH", "", &buf, ncUser)
|
||||
req.Header.Set("Content-Type", "application/xml")
|
||||
|
||||
res, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Decody body first before checking status code to print in error message
|
||||
defer res.Body.Close()
|
||||
|
||||
resBody, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create folder if not existing
|
||||
if res.StatusCode == 404 {
|
||||
logger.Info("Creating directory '%s' because it does not exist", "/"+directory)
|
||||
CreateFoldersRecursively(ncUser, "/"+directory+"notExisting.pdf")
|
||||
return &searchResult{}, nil
|
||||
}
|
||||
|
||||
if res.StatusCode != 207 {
|
||||
return nil, fmt.Errorf("status code %d: %s", res.StatusCode, resBody)
|
||||
}
|
||||
|
||||
var result searchResult
|
||||
if err = xml.Unmarshal(resBody, &result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// Parses the response from the given search format to an NcFile.
|
||||
// A map with the relative path based on the source Directory ("someFolder/file.txt")
|
||||
// and the mathing NcFile will be returned. Therefore, also the source Directory has to be given.
|
||||
//
|
||||
// To determine the path without the prefix "/remote.php/dav/user/" it has to be given.
|
||||
func ParseSearchResult(result *searchResult, prefix string, sourceDir string) map[string]NcFile {
|
||||
preCount := len(prefix)
|
||||
rtc := make(map[string]NcFile)
|
||||
|
||||
for _, file := range result.Response {
|
||||
href, _ := url.QueryUnescape(file.Href)
|
||||
path := href[preCount:]
|
||||
var extension = filepath.Ext(path)
|
||||
var name = path[0 : len(path)-len(extension)][len(sourceDir):]
|
||||
time := file.GetLastModified()
|
||||
size, err := strconv.Atoi(file.Propstat.Prop.Size)
|
||||
if err != nil {
|
||||
logger.Error("Failed to parse the file size '%s' to an integer: %s", file.Propstat.Prop.Size, err)
|
||||
continue
|
||||
}
|
||||
rtc[name] = NcFile{
|
||||
Extension: extension,
|
||||
Path: path,
|
||||
LastModified: time,
|
||||
Size: size,
|
||||
ContentType: file.Propstat.Prop.Getcontenttype,
|
||||
Fileid: file.Propstat.Prop.Fileid,
|
||||
WebdavURL: file.Href,
|
||||
}
|
||||
}
|
||||
|
||||
return rtc
|
||||
}
|
||||
|
||||
// Delets a file with the given path.
|
||||
// The path has to start at the root level: Ebook/myFolder/file.txt
|
||||
func DeleteFile(ncUser *models.NextcloudUser, filePath string) error {
|
||||
client := http.Client{Timeout: 5 * time.Second}
|
||||
|
||||
req := getRequest(http.MethodDelete, "files/"+ncUser.Username+"/"+filePath, nil, ncUser)
|
||||
|
||||
res, err := client.Do(req)
|
||||
if err != nil {
|
||||
logger.Error("%s", err)
|
||||
}
|
||||
|
||||
if res.StatusCode != 204 {
|
||||
return fmt.Errorf("failed to delete file %s (%d)", filePath, res.StatusCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Creates all required directorys to create the destination file recursively.
|
||||
// The path should be relative to the root: ebook/folder1/folder2/file.txt
|
||||
func CreateFoldersRecursively(ncUser *models.NextcloudUser, destinationFile string) {
|
||||
s := strings.Split(destinationFile, "/")
|
||||
folderTree := ""
|
||||
|
||||
// Webdav doesn't have a function to create directories recursively → iterate
|
||||
for _, folder := range s[:len(s)-1] {
|
||||
folderTree += folder + "/"
|
||||
|
||||
client := http.Client{Timeout: 5 * time.Second}
|
||||
req := getRequest("MKCOL", "files/"+ncUser.Username+"/"+folderTree, nil, ncUser)
|
||||
|
||||
res, err := client.Do(req)
|
||||
if err != nil {
|
||||
logger.Error("%s", err)
|
||||
}
|
||||
|
||||
if res.StatusCode != 201 && res.StatusCode != 405 {
|
||||
logger.Error("Failed to create directory '%s'", folderTree)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Uploads a file to the nextcloud server.
|
||||
// It will be saved to the destination as a relative path to the nextcloud root (ebook/file.txt).
|
||||
func UploadFile(ncUser *models.NextcloudUser, destination string, content io.ReadCloser) error {
|
||||
client := http.Client{Timeout: 5 * time.Second}
|
||||
req := getRequest(http.MethodPut, "files/"+ncUser.Username+"/"+destination, content, ncUser)
|
||||
|
||||
res, err := client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if res.StatusCode != 201 && res.StatusCode != 204 {
|
||||
return fmt.Errorf("expected status code 201 or 204 but got %d", res.StatusCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
22
main.go
22
main.go
|
@ -1,22 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"rpjosh.de/ncDocConverter/logger"
|
||||
)
|
||||
|
||||
func init() {
|
||||
defaultLogger := logger.Logger {
|
||||
PrintLevel: 0,
|
||||
LogLevel: 1,
|
||||
LogFilePath: "log.log",
|
||||
PrintSource: true,
|
||||
}
|
||||
|
||||
logger.SetGlobalLogger(&defaultLogger)
|
||||
}
|
||||
|
||||
func main() {
|
||||
defer logger.CloseFile()
|
||||
|
||||
|
||||
}
|
|
@ -3,15 +3,16 @@ package logger
|
|||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"os"
|
||||
)
|
||||
|
||||
// define available log levels
|
||||
// Level of the log message
|
||||
type Level uint8
|
||||
|
||||
const (
|
||||
LevelDebug Level = iota
|
||||
LevelInfo
|
||||
|
@ -35,7 +36,6 @@ type Logger struct {
|
|||
var dLogger Logger
|
||||
|
||||
func init() {
|
||||
|
||||
dLogger = Logger{
|
||||
PrintLevel: LevelDebug,
|
||||
LogLevel: LevelInfo,
|
||||
|
@ -52,45 +52,50 @@ func (l Logger) Log(level Level, message string, parameters ...any) {
|
|||
}
|
||||
|
||||
func (l Logger) log(level Level, message string, parameters ...any) {
|
||||
pc, file, line, ok := runtime.Caller(2)
|
||||
if (!ok) {
|
||||
pc, file, line, ok := runtime.Caller(3)
|
||||
if !ok {
|
||||
file = "#unknown"
|
||||
line = 0
|
||||
}
|
||||
|
||||
// get the name of the level
|
||||
var levelName string
|
||||
switch (level) {
|
||||
case LevelDebug: levelName = "DEBUG"
|
||||
case LevelInfo: levelName = "INFO "
|
||||
case LevelWarning: levelName = "WARN "
|
||||
case LevelError: levelName = "ERROR"
|
||||
case LevelFatal: levelName = "FATAL"
|
||||
switch level {
|
||||
case LevelDebug:
|
||||
levelName = "DEBUG"
|
||||
case LevelInfo:
|
||||
levelName = "INFO "
|
||||
case LevelWarning:
|
||||
levelName = "WARN "
|
||||
case LevelError:
|
||||
levelName = "ERROR"
|
||||
case LevelFatal:
|
||||
levelName = "FATAL"
|
||||
}
|
||||
|
||||
if (levelName == "") {
|
||||
if levelName == "" {
|
||||
message = fmt.Sprintf("Invalid level value given: %d. Original message: ", level) + message
|
||||
levelName = "WARN "
|
||||
level = LevelWarning
|
||||
}
|
||||
|
||||
printMessage := "[" + levelName + "] " + time.Now().UTC().Format("2006-01-02 03:04:05") +
|
||||
printMessage := "[" + levelName + "] " + time.Now().Local().Format("2006-01-02 15:04:05") +
|
||||
getSourceMessage(file, line, pc, l) +
|
||||
fmt.Sprintf(message, parameters...)
|
||||
|
||||
if (l.LogLevel <= level && l.fileLogger != nil) {
|
||||
if l.LogLevel <= level && l.fileLogger != nil {
|
||||
l.fileLogger.Println(printMessage)
|
||||
l.logFile.Sync()
|
||||
|
||||
if (level == LevelFatal) {
|
||||
if level == LevelFatal {
|
||||
l.CloseFile()
|
||||
}
|
||||
}
|
||||
|
||||
if (l.PrintLevel <= level) {
|
||||
if (level == LevelError) {
|
||||
if l.PrintLevel <= level {
|
||||
if level == LevelError {
|
||||
l.consoleLoggerErr.Println(printMessage)
|
||||
} else if (level == LevelFatal) {
|
||||
} else if level == LevelFatal {
|
||||
l.consoleLoggerErr.Fatal(printMessage)
|
||||
} else {
|
||||
l.consoleLogger.Println(printMessage)
|
||||
|
@ -99,8 +104,8 @@ func (l Logger) log(level Level, message string, parameters ...any) {
|
|||
|
||||
}
|
||||
|
||||
func getSourceMessage(file string, line int, pc uintptr, l Logger) (string) {
|
||||
if (!l.PrintSource) {
|
||||
func getSourceMessage(file string, line int, pc uintptr, l Logger) string {
|
||||
if !l.PrintSource {
|
||||
return " - "
|
||||
}
|
||||
|
||||
|
@ -110,10 +115,11 @@ func getSourceMessage(file string, line int, pc uintptr, l Logger) (string) {
|
|||
}
|
||||
|
||||
func (l *Logger) setup() {
|
||||
// log.Ldate|log.Ltime|log.Lshortfile
|
||||
l.consoleLogger = log.New(os.Stdout, "", 0)
|
||||
l.consoleLoggerErr = log.New(os.Stderr, "", 0)
|
||||
|
||||
if (l.LogFilePath != "") {
|
||||
if strings.TrimSpace(l.LogFilePath) != "" {
|
||||
file, err := os.OpenFile(l.LogFilePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err == nil {
|
||||
l.fileLogger = log.New(file, "", 0)
|
||||
|
@ -123,7 +129,7 @@ func (l *Logger) setup() {
|
|||
}
|
||||
} else {
|
||||
l.fileLogger = nil
|
||||
if (l.logFile != nil) {
|
||||
if l.logFile != nil {
|
||||
l.logFile.Close()
|
||||
l.logFile = nil
|
||||
}
|
||||
|
@ -131,18 +137,20 @@ func (l *Logger) setup() {
|
|||
}
|
||||
|
||||
func (l *Logger) CloseFile() {
|
||||
if (dLogger.logFile != nil) {
|
||||
dLogger.logFile.Close()
|
||||
dLogger.logFile = nil
|
||||
dLogger.fileLogger = nil
|
||||
if l.logFile != nil {
|
||||
l.logFile.Close()
|
||||
l.logFile = nil
|
||||
l.fileLogger = nil
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
func SetGlobalLogger(l *Logger) {
|
||||
dLogger = *l
|
||||
dLogger.setup()
|
||||
}
|
||||
func GetGlobalLogger() *Logger {
|
||||
return &dLogger
|
||||
}
|
||||
|
||||
func Debug(message string, parameters ...any) {
|
||||
dLogger.Log(LevelDebug, message, parameters...)
|
||||
|
@ -163,3 +171,28 @@ func Fatal(message string, parameters ...any) {
|
|||
func CloseFile() {
|
||||
dLogger.CloseFile()
|
||||
}
|
||||
|
||||
// Tries to convert the given level name to the corresponding level code.
|
||||
// Allowed values are: 'debug', 'info', 'warn', 'warning', 'error', 'panic' and 'fatal'
|
||||
// If an incorrect level name was given an warning is logged and info will be returned
|
||||
func GetLevelByName(levelName string) Level {
|
||||
levelName = strings.ToLower(levelName)
|
||||
switch levelName {
|
||||
case "debug":
|
||||
return LevelDebug
|
||||
case "info":
|
||||
return LevelInfo
|
||||
case "warn", "warning":
|
||||
return LevelWarning
|
||||
case "error":
|
||||
return LevelError
|
||||
case "panic", "fatal":
|
||||
return LevelFatal
|
||||
|
||||
default:
|
||||
{
|
||||
Warning("Unable to parse the level name '%s'. Expected 'debug', 'info', 'warn', 'error' or 'fatal'", levelName)
|
||||
return LevelInfo
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
package utils
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
// Makes the first character of the given string to uppercase
|
||||
func FirstCharToUppercase(text string) string {
|
||||
a := []rune(text)
|
||||
a[0] = unicode.ToLower(a[0])
|
||||
return string(a)
|
||||
}
|
||||
|
||||
// Copys a map. The structs are also cloned
|
||||
func CopyMap[T comparable, Val any](m map[T]Val) map[T]Val {
|
||||
cp := make(map[T]Val)
|
||||
for k, v := range m {
|
||||
var u Val
|
||||
Copy(&v, &u)
|
||||
cp[k] = u
|
||||
}
|
||||
|
||||
return cp
|
||||
}
|
||||
|
||||
// Copies a struct
|
||||
func Copy(source interface{}, destin interface{}) {
|
||||
x := reflect.ValueOf(source)
|
||||
if reflect.ValueOf(destin).Kind() != reflect.Ptr {
|
||||
return
|
||||
}
|
||||
if x.Kind() == reflect.Ptr {
|
||||
reflect.ValueOf(destin).Elem().Set(x.Elem())
|
||||
} else {
|
||||
reflect.ValueOf(destin).Elem().Set(x)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,15 @@
|
|||
@ECHO OFF
|
||||
|
||||
:: Bypass the "Terminate Batch Job" prompt
|
||||
if "%~1"=="-FIXED_CTRL_C" (
|
||||
:: Remove the -FIXED_CTRL_C parameter
|
||||
SHIFT
|
||||
) ELSE (
|
||||
:: Run the batch with <NUL and -FIXED_CTRL_C
|
||||
CALL <NUL %0 -FIXED_CTRL_C %*
|
||||
GOTO :EOF
|
||||
)
|
||||
|
||||
SET PATH=%PATH%;C:\Windows\System32
|
||||
set GOTMPDIR=C:\MYCOMP
|
||||
nodemon --delay 1s -e go,html --ignore web/app/ --signal SIGKILL --exec go run ./cmd/ncDocConverth || exit 1
|
|
@ -0,0 +1,3 @@
|
|||
#!/bin/sh
|
||||
|
||||
nodemon --delay 1s -e go,html,yaml --ignore web/app/ --signal SIGTERM --exec 'go run ./cmd/ncDocConverth || exit 1'
|
|
@ -0,0 +1,40 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<d:searchrequest xmlns:d="DAV:" xmlns:oc="http://owncloud.org/ns">
|
||||
<d:basicsearch>
|
||||
<d:select>
|
||||
<d:prop>
|
||||
<d:getcontenttype/>
|
||||
<d:getlastmodified/>
|
||||
<oc:size/>
|
||||
<oc:fileid/>
|
||||
</d:prop>
|
||||
</d:select>
|
||||
<d:from>
|
||||
<d:scope>
|
||||
<d:href>/files/{{.Username}}/{{.Directory}}</d:href>
|
||||
<d:depth></d:depth>
|
||||
</d:scope>
|
||||
</d:from>
|
||||
<d:where>
|
||||
<d:and>
|
||||
<d:or>
|
||||
{{range .ContentType}}
|
||||
<d:eq>
|
||||
<d:prop>
|
||||
<d:getcontenttype/>
|
||||
</d:prop>
|
||||
<d:literal>{{ . }}</d:literal>
|
||||
</d:eq>
|
||||
{{end}}
|
||||
</d:or>
|
||||
<d:gt>
|
||||
<d:prop>
|
||||
<oc:size/>
|
||||
</d:prop>
|
||||
<d:literal>100</d:literal>
|
||||
</d:gt>
|
||||
</d:and>
|
||||
</d:where>
|
||||
<d:orderby/>
|
||||
</d:basicsearch>
|
||||
</d:searchrequest>
|
|
@ -0,0 +1,8 @@
|
|||
package web
|
||||
|
||||
import (
|
||||
"embed"
|
||||
)
|
||||
|
||||
//go:embed "apitemplate"
|
||||
var ApiTemplateFiles embed.FS
|
Loading…
Reference in New Issue