Squashed commit of the following:

commit 1eff2d7d67
Author: abraunegg <alex.braunegg@gmail.com>
Date:   Wed Oct 18 19:15:27 2023 +1100

    Update PR

    * Add  --source-directory 'path/as/source/' --destination-directory 'path/as/destination' functionality

commit ad3ddee0ec
Author: abraunegg <alex.braunegg@gmail.com>
Date:   Wed Oct 18 17:32:24 2023 +1100

    Update PR

    * Add --create-directory
    * Add --remove-directory

commit 7dfe6b65b7
Author: abraunegg <alex.braunegg@gmail.com>
Date:   Wed Oct 18 12:27:03 2023 +1100

    Update PR

    * Update PR

commit 75c071e56f
Author: abraunegg <alex.braunegg@gmail.com>
Date:   Wed Oct 18 10:12:05 2023 +1100

    Update PR

    * Update PR

commit 6db484cdad
Author: abraunegg <alex.braunegg@gmail.com>
Date:   Mon Oct 16 17:01:25 2023 +1100

    Update PR

    * Update PR

commit d893ea5460
Author: abraunegg <alex.braunegg@gmail.com>
Date:   Wed Oct 11 10:43:50 2023 +1100

    Update PR

    * Update PR

commit 82bd593bf4
Author: abraunegg <alex.braunegg@gmail.com>
Date:   Wed Oct 11 09:14:17 2023 +1100

    Update PR

    * Validate and document --auth-files operation

commit c551203f4c
Author: abraunegg <alex.braunegg@gmail.com>
Date:   Wed Oct 11 05:48:22 2023 +1100

    Update PR

    * Add --create-share-link

commit fbf63999ff
Author: abraunegg <alex.braunegg@gmail.com>
Date:   Tue Oct 10 18:39:21 2023 +1100

    Update PR

    * Update PR

commit 72a4680035
Author: abraunegg <alex.braunegg@gmail.com>
Date:   Tue Oct 10 17:43:15 2023 +1100

    Update PR

    * Add --get-file-link
    * Add --modified-by

commit 0d3fc3ebf2
Author: abraunegg <alex.braunegg@gmail.com>
Date:   Tue Oct 10 14:28:10 2023 +1100

    Add --display-sync-status

    * Add --display-sync-status

commit 1f183ca03e
Author: abraunegg <alex.braunegg@gmail.com>
Date:   Mon Oct 9 08:18:13 2023 +1100

    Update PR

    * Update PR with doc updates

commit b0628d7099
Author: abraunegg <alex.braunegg@gmail.com>
Date:   Sun Oct 8 10:52:52 2023 +1100

    Update PR

    * Update PR

commit 7e3df956ce
Author: abraunegg <alex.braunegg@gmail.com>
Date:   Sat Oct 7 05:31:26 2023 +1100

    Update PR

    * Update PR

commit c69f2abc4b
Author: abraunegg <alex.braunegg@gmail.com>
Date:   Sat Oct 7 05:28:28 2023 +1100

    Update PR

    * Update PR

commit ea1ca33374
Author: abraunegg <alex.braunegg@gmail.com>
Date:   Fri Oct 6 14:57:51 2023 +1100

    Update PR

    * Update PR

commit 1503f969df
Author: abraunegg <alex.braunegg@gmail.com>
Date:   Fri Oct 6 09:19:04 2023 +1100

    Update PR

    * Update PR

commit 5127464f2c
Author: abraunegg <alex.braunegg@gmail.com>
Date:   Fri Oct 6 06:48:20 2023 +1100

    Change when the integrity check is performed

    * Change when the integrity check is performed

commit c7cc45d95c
Author: abraunegg <alex.braunegg@gmail.com>
Date:   Thu Oct 5 19:40:05 2023 +1100

    Update maxInotifyWatches location

    * Update maxInotifyWatches location

commit c44ad963a6
Author: abraunegg <alex.braunegg@gmail.com>
Date:   Thu Oct 5 17:41:31 2023 +1100

    Update main.d

    * Fix --version segfault

commit 51f0ffcb1f
Author: abraunegg <alex.braunegg@gmail.com>
Date:   Thu Oct 5 17:24:30 2023 +1100

    Uplift to v2.5.0-alpha-2

    * Uplift to v2.5.0-alpha-2

commit cbe3e6ea84
Author: abraunegg <alex.braunegg@gmail.com>
Date:   Thu Oct 5 17:17:26 2023 +1100

    Clean up before onedrive-v2.5.0-alpha-2

    * Clean up before onedrive-v2.5.0-alpha-2
This commit is contained in:
abraunegg 2023-10-19 05:31:50 +11:00
parent 4a60654e3f
commit 4253318835
29 changed files with 13002 additions and 12936 deletions

View file

@ -55,7 +55,7 @@ endif
system_unit_files = contrib/systemd/onedrive@.service
user_unit_files = contrib/systemd/onedrive.service
DOCFILES = README.md config LICENSE CHANGELOG.md docs/Docker.md docs/INSTALL.md docs/SharePoint-Shared-Libraries.md docs/USAGE.md docs/BusinessSharedFolders.md docs/advanced-usage.md docs/application-security.md
DOCFILES = readme.md config LICENSE changelog.md docs/advanced-usage.md docs/application-config-options.md docs/application-security.md docs/business-shared-folders.md docs/docker.md docs/install.md docs/national-cloud-deployments.md docs/podman.md docs/privacy-policy.md docs/sharepoint-libraries.md docs/terms-of-service.md docs/ubuntu-package-install.md docs/usage.md
ifneq ("$(wildcard /etc/redhat-release)","")
RHEL = $(shell cat /etc/redhat-release | grep -E "(Red Hat Enterprise Linux|CentOS)" | wc -l)
@ -66,20 +66,19 @@ RHEL_VERSION = 0
endif
SOURCES = \
src/config.d \
src/itemdb.d \
src/log.d \
src/main.d \
src/monitor.d \
src/onedrive.d \
src/qxor.d \
src/selective.d \
src/sqlite.d \
src/sync.d \
src/upload.d \
src/config.d \
src/log.d \
src/util.d \
src/qxor.d \
src/curlEngine.d \
src/onedrive.d \
src/sync.d \
src/itemdb.d \
src/sqlite.d \
src/clientSideFiltering.d \
src/progress.d \
src/arsd/cgi.d
src/monitor.d
ifeq ($(NOTIFICATIONS),yes)
SOURCES += src/notifications/notify.d src/notifications/dnotify.d

View file

@ -2,6 +2,13 @@
The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
## 2.5.0 - TBA
### Changed
* Renamed various documentation files to align with document content
## 2.4.25 - 2023-06-21
### Fixed
* Fixed that the application was reporting as v2.2.24 when in fact it was v2.4.24 (release tagging issue)

20
configure vendored
View file

@ -1,6 +1,6 @@
#! /bin/sh
# Guess values for system-dependent variables and create Makefiles.
# Generated by GNU Autoconf 2.69 for onedrive v2.4.25.
# Generated by GNU Autoconf 2.69 for onedrive v2.5.0-alpha-2.
#
# Report bugs to <https://github.com/abraunegg/onedrive>.
#
@ -579,8 +579,8 @@ MAKEFLAGS=
# Identity of this package.
PACKAGE_NAME='onedrive'
PACKAGE_TARNAME='onedrive'
PACKAGE_VERSION='v2.4.25'
PACKAGE_STRING='onedrive v2.4.25'
PACKAGE_VERSION='v2.5.0-alpha-2'
PACKAGE_STRING='onedrive v2.5.0-alpha-2'
PACKAGE_BUGREPORT='https://github.com/abraunegg/onedrive'
PACKAGE_URL=''
@ -1219,7 +1219,7 @@ if test "$ac_init_help" = "long"; then
# Omit some internal or obsolete options to make the list less imposing.
# This message is too long to be a string in the A/UX 3.1 sh.
cat <<_ACEOF
\`configure' configures onedrive v2.4.25 to adapt to many kinds of systems.
\`configure' configures onedrive v2.5.0-alpha-2 to adapt to many kinds of systems.
Usage: $0 [OPTION]... [VAR=VALUE]...
@ -1280,7 +1280,7 @@ fi
if test -n "$ac_init_help"; then
case $ac_init_help in
short | recursive ) echo "Configuration of onedrive v2.4.25:";;
short | recursive ) echo "Configuration of onedrive v2.5.0-alpha-2:";;
esac
cat <<\_ACEOF
@ -1393,7 +1393,7 @@ fi
test -n "$ac_init_help" && exit $ac_status
if $ac_init_version; then
cat <<\_ACEOF
onedrive configure v2.4.25
onedrive configure v2.5.0-alpha-2
generated by GNU Autoconf 2.69
Copyright (C) 2012 Free Software Foundation, Inc.
@ -1410,7 +1410,7 @@ cat >config.log <<_ACEOF
This file contains any messages produced by compilers while
running configure, to aid debugging if configure makes a mistake.
It was created by onedrive $as_me v2.4.25, which was
It was created by onedrive $as_me v2.5.0-alpha-2, which was
generated by GNU Autoconf 2.69. Invocation command line was
$ $0 $@
@ -2162,7 +2162,7 @@ fi
PACKAGE_DATE="June 2023"
PACKAGE_DATE="October 2023"
@ -3159,7 +3159,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
# report actual input values of CONFIG_FILES etc. instead of their
# values after options handling.
ac_log="
This file was extended by onedrive $as_me v2.4.25, which was
This file was extended by onedrive $as_me v2.5.0-alpha-2, which was
generated by GNU Autoconf 2.69. Invocation command line was
CONFIG_FILES = $CONFIG_FILES
@ -3212,7 +3212,7 @@ _ACEOF
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
ac_cs_version="\\
onedrive config.status v2.4.25
onedrive config.status v2.5.0-alpha-2
configured by $0, generated by GNU Autoconf 2.69,
with options \\"\$ac_cs_config\\"

View file

@ -9,7 +9,7 @@ dnl - commit the changed files (configure.ac, configure)
dnl - tag the release
AC_PREREQ([2.69])
AC_INIT([onedrive],[v2.4.25], [https://github.com/abraunegg/onedrive], [onedrive])
AC_INIT([onedrive],[v2.5.0-alpha-2], [https://github.com/abraunegg/onedrive], [onedrive])
AC_CONFIG_SRCDIR([src/main.d])

View file

@ -1,192 +0,0 @@
# How to configure OneDrive Business Shared Folder Sync
## Application Version
Before reading this document, please ensure you are running application version [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) or greater. Use `onedrive --version` to determine what application version you are using and upgrade your client if required.
## Process Overview
Syncing OneDrive Business Shared Folders requires additional configuration for your 'onedrive' client:
1. List available shared folders to determine which folder you wish to sync & to validate that you have access to that folder
2. Create a new file called 'business_shared_folders' in your config directory which contains a list of the shared folders you wish to sync
3. Test the configuration using '--dry-run'
4. Sync the OneDrive Business Shared folders as required
## Listing available OneDrive Business Shared Folders
List the available OneDrive Business Shared folders with the following command:
```text
onedrive --list-shared-folders
```
This will return a listing of all OneDrive Business Shared folders which have been shared with you and by whom. This is important for conflict resolution:
```text
Initializing the Synchronization Engine ...
Listing available OneDrive Business Shared Folders:
---------------------------------------
Shared Folder: SharedFolder0
Shared By: Firstname Lastname
---------------------------------------
Shared Folder: SharedFolder1
Shared By: Firstname Lastname
---------------------------------------
Shared Folder: SharedFolder2
Shared By: Firstname Lastname
---------------------------------------
Shared Folder: SharedFolder0
Shared By: Firstname Lastname (user@domain)
---------------------------------------
Shared Folder: SharedFolder1
Shared By: Firstname Lastname (user@domain)
---------------------------------------
Shared Folder: SharedFolder2
Shared By: Firstname Lastname (user@domain)
...
```
## Configuring OneDrive Business Shared Folders
1. Create a new file called 'business_shared_folders' in your config directory
2. On each new line, list the OneDrive Business Shared Folder you wish to sync
```text
[alex@centos7full onedrive]$ cat ~/.config/onedrive/business_shared_folders
# comment
Child Shared Folder
# Another comment
Top Level to Share
[alex@centos7full onedrive]$
```
3. Validate your configuration with `onedrive --display-config`:
```text
Configuration file successfully loaded
onedrive version = v2.4.3
Config path = /home/alex/.config/onedrive-business/
Config file found in config path = true
Config option 'check_nosync' = false
Config option 'sync_dir' = /home/alex/OneDriveBusiness
Config option 'skip_dir' =
Config option 'skip_file' = ~*|.~*|*.tmp
Config option 'skip_dotfiles' = false
Config option 'skip_symlinks' = false
Config option 'monitor_interval' = 300
Config option 'min_notify_changes' = 5
Config option 'log_dir' = /var/log/onedrive/
Config option 'classify_as_big_delete' = 1000
Config option 'sync_root_files' = false
Selective sync 'sync_list' configured = false
Business Shared Folders configured = true
business_shared_folders contents:
# comment
Child Shared Folder
# Another comment
Top Level to Share
```
## Performing a sync of OneDrive Business Shared Folders
Perform a standalone sync using the following command: `onedrive --synchronize --sync-shared-folders --verbose`:
```text
onedrive --synchronize --sync-shared-folders --verbose
Using 'user' Config Dir: /home/alex/.config/onedrive-business/
Using 'system' Config Dir:
Configuration file successfully loaded
Initializing the OneDrive API ...
Configuring Global Azure AD Endpoints
Opening the item database ...
All operations will be performed in: /home/alex/OneDriveBusiness
Application version: v2.4.3
Account Type: business
Default Drive ID: b!bO8V7s9SSk6r7mWHpIjURotN33W1W2tEv3OXV_oFIdQimEdOHR-1So7CqeT1MfHA
Default Root ID: 01WIXGO5V6Y2GOVW7725BZO354PWSELRRZ
Remaining Free Space: 1098316220277
Fetching details for OneDrive Root
OneDrive Root exists in the database
Initializing the Synchronization Engine ...
Syncing changes from OneDrive ...
Applying changes of Path ID: 01WIXGO5V6Y2GOVW7725BZO354PWSELRRZ
Number of items from OneDrive to process: 0
Attempting to sync OneDrive Business Shared Folders
Syncing this OneDrive Business Shared Folder: Child Shared Folder
OneDrive Business Shared Folder - Shared By: test user
Applying changes of Path ID: 01JRXHEZMREEB3EJVHNVHKNN454Q7DFXPR
Adding OneDrive root details for processing
Adding OneDrive folder details for processing
Adding 4 OneDrive items for processing from OneDrive folder
Adding 2 OneDrive items for processing from /Child Shared Folder/Cisco VDI Whitepaper
Adding 2 OneDrive items for processing from /Child Shared Folder/SMPP_Shared
Processing 11 OneDrive items to ensure consistent local state
Syncing this OneDrive Business Shared Folder: Top Level to Share
OneDrive Business Shared Folder - Shared By: test user (testuser@mynasau3.onmicrosoft.com)
Applying changes of Path ID: 01JRXHEZLRMXHKBYZNOBF3TQOPBXS3VZMA
Adding OneDrive root details for processing
Adding OneDrive folder details for processing
Adding 4 OneDrive items for processing from OneDrive folder
Adding 3 OneDrive items for processing from /Top Level to Share/10-Files
Adding 2 OneDrive items for processing from /Top Level to Share/10-Files/Cisco VDI Whitepaper
Adding 2 OneDrive items for processing from /Top Level to Share/10-Files/Images
Adding 8 OneDrive items for processing from /Top Level to Share/10-Files/Images/JPG
Adding 8 OneDrive items for processing from /Top Level to Share/10-Files/Images/PNG
Adding 2 OneDrive items for processing from /Top Level to Share/10-Files/SMPP
Processing 31 OneDrive items to ensure consistent local state
Uploading differences of ~/OneDriveBusiness
Processing root
The directory has not changed
Processing SMPP_Local
The directory has not changed
Processing SMPP-IF-SPEC_v3_3-24858.pdf
The file has not changed
Processing SMPP_v3_4_Issue1_2-24857.pdf
The file has not changed
Processing new_local_file.txt
The file has not changed
Processing root
The directory has not changed
...
The directory has not changed
Processing week02-03-Combinational_Logic-v1.pptx
The file has not changed
Uploading new items of ~/OneDriveBusiness
Applying changes of Path ID: 01WIXGO5V6Y2GOVW7725BZO354PWSELRRZ
Number of items from OneDrive to process: 0
Attempting to sync OneDrive Business Shared Folders
Syncing this OneDrive Business Shared Folder: Child Shared Folder
OneDrive Business Shared Folder - Shared By: test user
Applying changes of Path ID: 01JRXHEZMREEB3EJVHNVHKNN454Q7DFXPR
Adding OneDrive root details for processing
Adding OneDrive folder details for processing
Adding 4 OneDrive items for processing from OneDrive folder
Adding 2 OneDrive items for processing from /Child Shared Folder/Cisco VDI Whitepaper
Adding 2 OneDrive items for processing from /Child Shared Folder/SMPP_Shared
Processing 11 OneDrive items to ensure consistent local state
Syncing this OneDrive Business Shared Folder: Top Level to Share
OneDrive Business Shared Folder - Shared By: test user (testuser@mynasau3.onmicrosoft.com)
Applying changes of Path ID: 01JRXHEZLRMXHKBYZNOBF3TQOPBXS3VZMA
Adding OneDrive root details for processing
Adding OneDrive folder details for processing
Adding 4 OneDrive items for processing from OneDrive folder
Adding 3 OneDrive items for processing from /Top Level to Share/10-Files
Adding 2 OneDrive items for processing from /Top Level to Share/10-Files/Cisco VDI Whitepaper
Adding 2 OneDrive items for processing from /Top Level to Share/10-Files/Images
Adding 8 OneDrive items for processing from /Top Level to Share/10-Files/Images/JPG
Adding 8 OneDrive items for processing from /Top Level to Share/10-Files/Images/PNG
Adding 2 OneDrive items for processing from /Top Level to Share/10-Files/SMPP
Processing 31 OneDrive items to ensure consistent local state
```
**Note:** Whenever you modify the `business_shared_folders` file you must perform a `--resync` of your database to clean up stale entries due to changes in your configuration.
## Enable / Disable syncing of OneDrive Business Shared Folders
Performing a sync of the configured OneDrive Business Shared Folders can be enabled / disabled via adding the following to your configuration file.
### Enable syncing of OneDrive Business Shared Folders via config file
```text
sync_business_shared_folders = "true"
```
### Disable syncing of OneDrive Business Shared Folders via config file
```text
sync_business_shared_folders = "false"
```
## Known Issues
Shared folders, shared with you from people outside of your 'organisation' are unable to be synced. This is due to the Microsoft Graph API not presenting these folders.
Shared folders that match this scenario, when you view 'Shared' via OneDrive online, will have a 'world' symbol as per below:
![shared_with_me](./images/shared_with_me.JPG)
This issue is being tracked by: [#966](https://github.com/abraunegg/onedrive/issues/966)

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,40 @@
# How to configure OneDrive Business Shared Folder Sync
## Application Version
Before reading this document, please ensure you are running application version [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) or greater. Use `onedrive --version` to determine what application version you are using and upgrade your client if required.
## Important Note
This feature has been 100% re-written from v2.5.0 onwards. A pre-requesite before using this capability in v2.5.0 and above is for you to revert any Shared Business Folder configuration you may be currently using, including, but not limited to:
* Removing `sync_business_shared_folders = "true|false"` from your 'config' file
* Removing the 'business_shared_folders' file
* Removing any local data | shared folder data from your configured 'sync_dir' to ensure that there are no conflicts or issues.
## Process Overview
Syncing OneDrive Business Shared Folders requires additional configuration for your 'onedrive' client:
1. From the OneDrive web interface, review the 'Shared' objects that have been shared with you.
2. Select the applicable folder, and click the 'Add shortcut to My files', which will then add this to your 'My files' folder
3. Update your OneDrive Client for Linux 'config' file to enable the feature by adding `sync_business_shared_items = "true"`. Adding this option will trigger a `--resync` requirement.
4. Test the configuration using '--dry-run'
5. Remove the use of '--dry-run' and sync the OneDrive Business Shared folders as required
**NOTE:** This documentation will be updated as this feature progresses.
### Enable syncing of OneDrive Business Shared Folders via config file
```text
sync_business_shared_items = "true"
```
### Disable syncing of OneDrive Business Shared Folders via config file
```text
sync_business_shared_items = "false"
```
## Known Issues
Shared folders, shared with you from people outside of your 'organisation' are unable to be synced. This is due to the Microsoft Graph API not presenting these folders.
Shared folders that match this scenario, when you view 'Shared' via OneDrive online, will have a 'world' symbol as per below:
![shared_with_me](./images/shared_with_me.JPG)
This issue is being tracked by: [#966](https://github.com/abraunegg/onedrive/issues/966)

View file

@ -66,7 +66,7 @@ ROOT level privileges prohibited!
### 3. First run
The 'onedrive' client within the Docker container needs to be authorized with your Microsoft account. This is achieved by initially running docker in interactive mode.
Run the docker image with the commands below and make sure to change `ONEDRIVE_DATA_DIR` to the actual onedrive data directory on your filesystem that you wish to use (e.g. `"/home/abraunegg/OneDrive"`).
Run the docker image with the commands below and make sure to change the value of `ONEDRIVE_DATA_DIR` to the actual onedrive data directory on your filesystem that you wish to use (e.g. `export ONEDRIVE_DATA_DIR="/home/abraunegg/OneDrive"`).
```bash
export ONEDRIVE_DATA_DIR="${HOME}/OneDrive"
mkdir -p ${ONEDRIVE_DATA_DIR}
@ -168,7 +168,7 @@ docker volume inspect onedrive_conf
Or you can map your own config folder to the config volume. Make sure to copy all files from the docker volume into your mapped folder first.
The detailed document for the config can be found here: [Configuration](https://github.com/abraunegg/onedrive/blob/master/docs/USAGE.md#configuration)
The detailed document for the config can be found here: [Configuration](https://github.com/abraunegg/onedrive/blob/master/docs/usage.md#configuration)
### 7. Sync multiple accounts
There are many ways to do this, the easiest is probably to
@ -211,7 +211,7 @@ docker run $firstRun --restart unless-stopped --name onedrive -v onedrive_conf:/
| <B>ONEDRIVE_LOGOUT</B> | Controls "--logout" switch. Default is 0 | 1 |
| <B>ONEDRIVE_REAUTH</B> | Controls "--reauth" switch. Default is 0 | 1 |
| <B>ONEDRIVE_AUTHFILES</B> | Controls "--auth-files" option. Default is "" | "authUrl:responseUrl" |
| <B>ONEDRIVE_AUTHRESPONSE</B> | Controls "--auth-response" option. Default is "" | See [here](https://github.com/abraunegg/onedrive/blob/master/docs/USAGE.md#authorize-the-application-with-your-onedrive-account) |
| <B>ONEDRIVE_AUTHRESPONSE</B> | Controls "--auth-response" option. Default is "" | See [here](https://github.com/abraunegg/onedrive/blob/master/docs/usage.md#authorize-the-application-with-your-onedrive-account) |
| <B>ONEDRIVE_DISPLAY_CONFIG</B> | Controls "--display-running-config" switch on onedrive sync. Default is 0 | 1 |
| <B>ONEDRIVE_SINGLE_DIRECTORY</B> | Controls "--single-directory" option. Default = "" | "mydir" |

View file

@ -211,8 +211,10 @@ sudo make install
```
### Build options
Notifications can be enabled using the `configure` switch `--enable-notifications`.
#### GUI Notification Support
GUI notification support can be enabled using the `configure` switch `--enable-notifications`.
#### systemd service directory customisation support
Systemd service files are installed in the appropriate directories on the system,
as provided by `pkg-config systemd` settings. If the need for overriding the
deduced path are necessary, the two options `--with-systemdsystemunitdir` (for
@ -220,9 +222,11 @@ the Systemd system unit location), and `--with-systemduserunitdir` (for the
Systemd user unit location) can be specified. Passing in `no` to one of these
options disabled service file installation.
#### Additional Compiler Debug
By passing `--enable-debug` to the `configure` call, `onedrive` gets built with additional debug
information, useful (for example) to get `perf`-issued figures.
#### Shell Completion Support
By passing `--enable-completions` to the `configure` call, shell completion functions are
installed for `bash`, `zsh` and `fish`. The installation directories are determined
as far as possible automatically, but can be overridden by passing

View file

@ -59,7 +59,7 @@ This will create a podman volume labeled `onedrive_conf`, where all configuratio
### 2. First run
The 'onedrive' client within the container needs to be authorized with your Microsoft account. This is achieved by initially running podman in interactive mode.
Run the podman image with the commands below and make sure to change `ONEDRIVE_DATA_DIR` to the actual onedrive data directory on your filesystem that you wish to use (e.g. `"/home/abraunegg/OneDrive"`).
Run the podman image with the commands below and make sure to change the value of `ONEDRIVE_DATA_DIR` to the actual onedrive data directory on your filesystem that you wish to use (e.g. `export ONEDRIVE_DATA_DIR="/home/abraunegg/OneDrive"`).
It is a requirement that the container be run using a non-root uid and gid, you must insert a non-root UID and GID (e.g.` export ONEDRIVE_UID=1000` and export `ONEDRIVE_GID=1000`).
@ -188,7 +188,7 @@ podman volume inspect onedrive_conf
```
Or you can map your own config folder to the config volume. Make sure to copy all files from the volume into your mapped folder first.
The detailed document for the config can be found here: [Configuration](https://github.com/abraunegg/onedrive/blob/master/docs/USAGE.md#configuration)
The detailed document for the config can be found here: [Configuration](https://github.com/abraunegg/onedrive/blob/master/docs/usage.md#configuration)
### 7. Sync multiple accounts
There are many ways to do this, the easiest is probably to
@ -219,7 +219,7 @@ podman run -it --restart unless-stopped --name onedrive_work \
| <B>ONEDRIVE_LOGOUT</B> | Controls "--logout" switch. Default is 0 | 1 |
| <B>ONEDRIVE_REAUTH</B> | Controls "--reauth" switch. Default is 0 | 1 |
| <B>ONEDRIVE_AUTHFILES</B> | Controls "--auth-files" option. Default is "" | "authUrl:responseUrl" |
| <B>ONEDRIVE_AUTHRESPONSE</B> | Controls "--auth-response" option. Default is "" | See [here](https://github.com/abraunegg/onedrive/blob/master/docs/USAGE.md#authorize-the-application-with-your-onedrive-account) |
| <B>ONEDRIVE_AUTHRESPONSE</B> | Controls "--auth-response" option. Default is "" | See [here](https://github.com/abraunegg/onedrive/blob/master/docs/usage.md#authorize-the-application-with-your-onedrive-account) |
| <B>ONEDRIVE_DISPLAY_CONFIG</B> | Controls "--display-running-config" switch on onedrive sync. Default is 0 | 1 |
| <B>ONEDRIVE_SINGLE_DIRECTORY</B> | Controls "--single-directory" option. Default = "" | "mydir" |

900
docs/usage.md Normal file
View file

@ -0,0 +1,900 @@
# Using the OneDrive Client for Linux
## Application Version
Before reading this document, please ensure you are running application version [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) or greater. Use `onedrive --version` to determine what application version you are using and upgrade your client if required.
## Table of Contents
- [Important Notes](#important-notes)
- [Upgrading from the 'skilion' Client](#upgrading-from-the-sklion-client)
- [Naming Conventions for Local Files and Folders](#naming-conventions-for-local-files-and-folders)
- [Compatibility with curl](#compatibility-with-curl)
- [First Steps](#first-steps)
- [Authorise the Application with Your Microsoft OneDrive Account](#authorise-the-application-with-your-microsoft-onedrive-account)
- [Display Your Applicable Runtime Configuration](#display-your-applicable-runtime-configuration)
- [Understanding OneDrive Client for Linux Operational Modes](#understanding-onedrive-client-for-linux-operational-modes)
- [Standalone Synchronisation Operational Mode (Standalone Mode)](#standalone-synchronisation-operational-mode-standalone-mode)
- [Ongoing Synchronisation Operational Mode (Monitor Mode)](#ongoing-synchronisation-operational-mode-monitor-mode)
- [Increasing application logging level](#increasing-application-logging-level)
- [Testing your configuration](#testing-your-configuration)
- [Performing a sync with Microsoft OneDrive](#performing-a-sync-with-microsoft-onedrive)
- [Performing a single directory synchronisation with Microsoft OneDrive](#performing-a-single-directory-synchronisation-with-microsoft-onedrive)
- [Performing a 'one-way' download synchronisation with Microsoft OneDrive](#performing-a-one-way-download-synchronisation-with-microsoft-onedrive)
- [Performing a 'one-way' upload synchronisation with Microsoft OneDrive](#performing-a-one-way-upload-synchronisation-with-microsoft-onedrive)
- [Performing a selective synchronisation via 'sync_list' file](#performing-a-selective-synchronisation-via-sync_list-file)
- [Performing a --resync](#performing-a-resync)
- [Performing a --force-sync without a --resync or changing your configuration](#performing-a-force-sync-without-a-resync-or-changing-your-configuration)
- [Enabling the Client Activity Log](#enabling-the-client-activity-log)
- [Client Activity Log Example:](#client-activity-log-example)
- [Client Activity Log Differences](#client-activity-log-differences)
- [GUI Notifications](#gui-notifications)
- [Handling a Microsoft OneDrive Account Password Change](#handling-a-microsoft-onedrive-account-password-change)
- [Determining the synchronisation result](#determining-the-synchronisation-result)
- [Frequently Asked Configuration Questions](#frequently-asked-configuration-questions)
- [How to change the default configuration of the client?](#how-to-change-the-default-configuration-of-the-client)
- [How to change where my data from Microsoft OneDrive is stored?](#how-to-change-where-my-data-from-microsoft-onedrive-is-stored)
- [How to change what file and directory permissions are assigned to data that is downloaded from Microsoft OneDrive?](#how-to-change-what-file-and-directory-permissions-are-assigned-to-data-that-is-downloaded-from-microsoft-onedrive)
- [How to only sync a specific directory?](#how-to-only-sync-a-specific-directory)
- [How to 'skip' files from syncing?](#how-to-skip-files-from-syncing)
- [How to 'skip' directories from syncing?](#how-to-skip-directories-from-syncing)
- [How to 'skip' dot files and folders from syncing?](#how-to-skip-dot-files-and-folders-from-syncing)
- [How to 'skip' files larger than a certain size from syncing?](#how-to-skip-files-larger-than-a-certain-size-from-syncing)
- [How to 'rate limit' the application to control bandwidth consumed for upload & download operations?](#how-to-rate-limit-the-application-to-control-bandwidth-consumed-for-upload--download-operations)
- [How can I prevent my local disk from filling up?](#how-can-i-prevent-my-local-disk-from-filling-up)
- [How does the client handle symbolic links?](#how-does-the-client-handle-symbolic-links)
- [How to synchronise shared folders (OneDrive Personal)?](#how-to-synchronise-shared-folders-onedrive-personal)
- [How to synchronise shared folders (OneDrive Business or Office 365)?](#how-to-synchronise-shared-folders-onedrive-business-or-office-365)
- [How to synchronise SharePoint / Office 365 Shared Libraries?](#how-to-synchronise-sharepoint--office-365-shared-libraries)
- [How to Create a Shareable Link?](#how-to-create-a-shareable-link)
- [How to Synchronise Both Personal and Business Accounts at once?](#how-to-synchronise-both-personal-and-business-accounts-at-once)
- [How to Synchronise Multiple SharePoint Libraries simultaneously?](#how-to-synchronise-multiple-sharepoint-libraries-simultaneously)
- [How to Receive Real-time Changes from Microsoft OneDrive Service, instead of waiting for the next sync period?](#how-to-receive-real-time-changes-from-microsoft-onedrive-service-instead-of-waiting-for-the-next-sync-period)
- [How to initiate the client as a background service?](#how-to-initiate-the-client-as-a-background-service)
- [OneDrive service running as root user via init.d](#onedrive-service-running-as-root-user-via-initd)
- [OneDrive service running as root user via systemd (Arch, Ubuntu, Debian, OpenSuSE, Fedora)](#onedrive-service-running-as-root-user-via-systemd-arch-ubuntu-debian-opensuse-fedora)
- [OneDrive service running as root user via systemd (Red Hat Enterprise Linux, CentOS Linux)](#onedrive-service-running-as-root-user-via-systemd-red-hat-enterprise-linux-centos-linux)
- [OneDrive service running as a non-root user via systemd (All Linux Distributions)](#onedrive-service-running-as-a-non-root-user-via-systemd-all-linux-distributions)
- [OneDrive service running as a non-root user via systemd (with notifications enabled) (Arch, Ubuntu, Debian, OpenSuSE, Fedora)](#onedrive-service-running-as-a-non-root-user-via-systemd-with-notifications-enabled-arch-ubuntu-debian-opensuse-fedora)
- [OneDrive service running as a non-root user via runit (antiX, Devuan, Artix, Void)](#onedrive-service-running-as-a-non-root-user-via-runit-antix-devuan-artix-void)
- [How to start a user systemd service at boot without user login?](#how-to-start-a-user-systemd-service-at-boot-without-user-login)
## Important Notes
### Upgrading from the 'skilion' Client
The 'skilion' version has a significant number of issues in how it manages the local sync state. When upgrading from the 'skilion' client to this client, it's recommended to stop any service or OneDrive process that may be running. Once all OneDrive services are stopped, make sure to remove any old client binaries from your system.
Furthermore, if you're using a 'config' file within your configuration directory (`~/.config/onedrive/`), please ensure that you update the `skip_file = ` option as shown below:
**Invalid 'skilion' configuration:**
```text
skip_file = ".*|~*"
```
**Minimum valid configuration:**
```text
skip_file = "~*"
```
**Default valid configuration:**
```text
skip_file = "~*|.~*|*.tmp|*.swp|*.partial"
```
Avoid using a 'skip_file' entry of `.*` as it may prevent the correct detection of local changes to process. The configuration values for 'skip_file' will be checked for validity, and if there is an issue, the following error message will be displayed:
```text
ERROR: Invalid skip_file entry '.*' detected
```
### Naming Conventions for Local Files and Folders
In the synchronisation directory, it's crucial to adhere to the [Windows naming conventions](https://docs.microsoft.com/windows/win32/fileio/naming-a-file) for your files and folders.
If you happen to have two files with the same names but different capitalisation, our application will make an effort to handle it. However, when there's a clash within the namespace, the file causing the conflict won't be synchronised. Please note that this behaviour is intentional and won't be addressed.
### Compatibility with curl
If your system uses curl < 7.47.0, curl will default to HTTP/1.1 for HTTPS operations, and the client will follow suit, using HTTP/1.1.
For systems running curl >= 7.47.0 and < 7.62.0, curl will prefer HTTP/2 for HTTPS, but it will still use HTTP/1.1 as the default for these operations. The client will employ HTTP/1.1 for HTTPS operations as well.
However, if your system employs curl >= 7.62.0, curl will, by default, prioritise HTTP/2 over HTTP/1.1. In this case, the client will utilise HTTP/2 for most HTTPS operations and stick with HTTP/1.1 for others. Please note that this distinction is governed by the OneDrive platform, not our client.
If you explicitly want to use HTTP/1.1, you can do so by using the `--force-http-11` flag or setting the configuration option `force_http_11 = "true"`. This will compel the application to exclusively use HTTP/1.1. Otherwise, all client operations will align with the curl default settings for your distribution.
## First Steps
### Authorise the Application with Your Microsoft OneDrive Account
Once you've installed the application, you'll need to authorise it using your Microsoft OneDrive Account. This can be done by simply running the application without any additional command switches.
Please be aware that some companies may require you to explicitly add this app to the [Microsoft MyApps portal](https://myapps.microsoft.com/). To add an approved app to your apps, click on the ellipsis in the top-right corner and select "Request new apps." On the next page, you can add this app. If it's not listed, you should make a request through your IT department.
When you run the application for the first time, you'll be prompted to open a specific URL using your web browser, where you'll need to log in to your Microsoft Account and grant the application permission to access your files. After granting permission to the application, you'll be redirected to a blank page. Simply copy the URI from the blank page and paste it into the application.
**Example:**
```text
[user@hostname ~]$ onedrive
Authorise this app by visiting:
https://login.microsoftonline.com/common/oauth2/v2.0/authorise?client_id=22c49a0d-d21c-4792-aed1-8f163c982546&scope=Files.ReadWrite%20Files.ReadWrite.all%20Sites.ReadWrite.All%20offline_access&response_type=code&redirect_uri=https://login.microsoftonline.com/common/oauth2/nativeclient
Enter the response URI from your browser: https://login.microsoftonline.com/common/oauth2/nativeclient?code=<redacted>
The application has been successfully authorised, but no additional command switches were provided.
Please use 'onedrive --help' for further assistance on how to run this application.
```
### Display Your Applicable Runtime Configuration
To verify the configuration that the application will use, use the following command:
```text
onedrive --display-config
```
This command will display all the relevant runtime interpretations of the options and configurations you are using. An example output is as follows:
```text
Reading configuration file: /home/user/.config/onedrive/config
Configuration file successfully loaded
onedrive version = vX.Y.Z-A-bcdefghi
Config path = /home/user/.config/onedrive
Config file found in config path = true
Config option 'drive_id' =
Config option 'sync_dir' = ~/OneDrive
...
Config option 'webhook_enabled' = false
```
### Understanding OneDrive Client for Linux Operational Modes
There are two modes of operation when using the client:
1. Standalone sync mode that performs a single sync action against Microsoft OneDrive.
2. Ongoing sync mode that continuously syncs your data with Microsoft OneDrive.
#### Standalone Synchronisation Operational Mode (Standalone Mode)
This method of use can be employed by issuing the following option to the client:
```text
onedrive --sync
```
For simplicity, this can be reduced to the following:
```text
onedrive -s
```
#### Ongoing Synchronisation Operational Mode (Monitor Mode)
This method of use can be utilised by issuing the following option to the client:
```text
onedrive --monitor
```
For simplicity, this can be shortened to the following:
```text
onedrive -m
```
**Note:** This method of use is typically employed when enabling a systemd service to run the application in the background.
Two common errors can occur when using monitor mode:
* Initialisation failure
* Unable to add a new inotify watch
Both of these errors are local environment issues, where the following system variables need to be increased as the current system values are potentially too low:
* `fs.file-max`
* `fs.inotify.max_user_watches`
To determine what the existing values are on your system, use the following commands:
```text
sysctl fs.file-max
sysctl fs.inotify.max_user_watches
```
Alternatively, when running the client with increased verbosity (see below), the client will display what the current configured system maximum values are:
```text
...
All application operations will be performed in: /home/user/OneDrive
OneDrive synchronisation interval (seconds): 300
Maximum allowed open files: 393370 <-- This is the fs.file-max value
Maximum allowed inotify watches: 29374 <-- This is the fs.inotify.max_user_watches value
Initialising filesystem inotify monitoring ...
...
```
To determine what value to change to, you need to count all the files and folders in your configured 'sync_dir':
```text
cd /path/to/your/sync/dir
ls -laR | wc -l
```
To make a change to these variables using your file and folder count, use the following process:
```text
sudo sysctl fs.file-max=<new_value>
sudo sysctl fs.inotify.max_user_watches=<new_value>
```
Once these values are changed, you will need to restart your client so that the new values are detected and used.
To make these changes permanent on your system, refer to your OS reference documentation.
### Increasing application logging level
When running a sync (`--sync`) or using monitor mode (`--monitor`), it may be desirable to see additional information regarding the progress and operation of the client. For example, for a `--sync` command, this would be:
```text
onedrive --sync --verbose
```
Furthermore, for simplicity, this can be simplified to the following:
```
onedrive -s -v
```
Adding `--verbose` twice will enable debug logging output. This is generally required when raising a bug report or needing to understand a problem.
### Testing your configuration
You can test your configuration by utilising the `--dry-run` CLI option. No files will be downloaded, uploaded, or removed; however, the application will display what 'would' have occurred. For example:
```text
onedrive --sync --verbose --dry-run
Reading configuration file: /home/user/.config/onedrive/config
Configuration file successfully loaded
Using 'user' Config Dir: /home/user/.config/onedrive
DRY-RUN Configured. Output below shows what 'would' have occurred.
DRY-RUN: Copying items.sqlite3 to items-dryrun.sqlite3 to use for dry run operations
DRY RUN: Not creating backup config file as --dry-run has been used
DRY RUN: Not updating hash files as --dry-run has been used
Checking Application Version ...
Attempting to initialise the OneDrive API ...
Configuring Global Azure AD Endpoints
The OneDrive API was initialised successfully
Opening the item database ...
Sync Engine Initialised with new Onedrive API instance
Application version: vX.Y.Z-A-bcdefghi
Account Type: <account-type>
Default Drive ID: <drive-id>
Default Root ID: <root-id>
Remaining Free Space: 1058488129 KB
All application operations will be performed in: /home/user/OneDrive
Fetching items from the OneDrive API for Drive ID: <drive-id> ..
...
Performing a database consistency and integrity check on locally stored data ...
Processing DB entries for this Drive ID: <drive-id>
Processing ~/OneDrive
The directory has not changed
...
Scanning local filesystem '~/OneDrive' for new data to upload ...
...
Performing a final true-up scan of online data from Microsoft OneDrive
Fetching items from the OneDrive API for Drive ID: <drive-id> ..
Sync with Microsoft OneDrive is complete
```
### Performing a sync with Microsoft OneDrive
By default, all files are downloaded in `~/OneDrive`. This download location is controlled by the 'sync_dir' config option.
After authorising the application, a sync of your data can be performed by running:
```text
onedrive --sync
```
This will synchronise files from your Microsoft OneDrive account to your `~/OneDrive` local directory or to your specified 'sync_dir' location.
If you prefer to use your local files as stored in `~/OneDrive` as your 'source of truth,' use the following sync command:
```text
onedrive --sync --local-first
```
### Performing a single directory synchronisation with Microsoft OneDrive
In some cases, it may be desirable to synchronise a single directory under ~/OneDrive without having to change your client configuration. To do this, use the following command:
```text
onedrive --sync --single-directory '<dir_name>'
```
**Example:** If the full path is `~/OneDrive/mydir`, the command would be `onedrive --sync --single-directory 'mydir'`
### Performing a 'one-way' download synchronisation with Microsoft OneDrive
In some cases, it may be desirable to 'download only' from Microsoft OneDrive. To do this, use the following command:
```text
onedrive --sync --download-only
```
This will download all the content from Microsoft OneDrive to your `~/OneDrive` location. Any files that are deleted online remain locally and will not be removed.
However, in some circumstances, it may be desirable to clean up local files that have been removed online. To do this, use the following command:
```text
onedrive --sync --download-only --cleanup-local-files
```
### Performing a 'one-way' upload synchronisation with Microsoft OneDrive
In some cases, it may be desirable to 'upload only' to Microsoft OneDrive. To do this, use the following command:
```text
onedrive --sync --upload-only
```
**Note:** If a file or folder is present on Microsoft OneDrive, which was previously synchronised and now does not exist locally, that item will be removed from Microsoft OneDrive online. If the data on Microsoft OneDrive should be kept, the following should be used:
```text
onedrive --sync --upload-only --no-remote-delete
```
**Note:** The operation of 'upload only' does not request data from Microsoft OneDrive about what 'other' data exists online. The client only knows about the data that 'this' client uploaded, thus any files or folders created or uploaded outside of this client will remain untouched online.
### Performing a selective synchronisation via 'sync_list' file
Selective synchronisation allows you to sync only specific files and directories.
To enable selective synchronisation, create a file named `sync_list` in your application configuration directory (default is `~/.config/onedrive`).
Important points to understand before using 'sync_list'.
* 'sync_list' excludes _everything_ by default on OneDrive.
* 'sync_list' follows an _"exclude overrides include"_ rule, and requires **explicit inclusion**.
* Order exclusions before inclusions, so that anything _specifically included_ is included.
* How and where you place your `/` matters for excludes and includes in subdirectories.
Each line of the file represents a relative path from your `sync_dir`. All files and directories not matching any line of the file will be skipped during all operations.
Additionally, the use of `/` is critically important to determine how a rule is interpreted. It is very similar to `**` wildcards, for those that are familiar with globbing patterns.
Here is an example of `sync_list`:
```text
# sync_list supports comments
#
# The ordering of entries is highly recommended - exclusions before inclusions
#
# Exclude temp folder(s) or file(s) under Documents folder(s), anywhere in OneDrive
!Documents/temp*
#
# Exclude secret data folder in root directory only
!/Secret_data/*
#
# Include everything else in root directory
/*
#
# Include my Backup folder(s) or file(s) anywhere on OneDrive
Backup
#
# Include my Backup folder in root
/Backup/
#
# Include Documents folder(s) anywhere in OneDrive
Documents/
#
# Include all PDF files in Documents folder(s), anywhere in OneDrive
Documents/*.pdf
#
# Include this single document in Documents folder(s), anywhere in OneDrive
Documents/latest_report.docx
#
# Include all Work/Project directories or files, inside 'Work' folder(s), anywhere in OneDrive
Work/Project*
#
# Include all "notes.txt" files, anywhere in OneDrive
notes.txt
#
# Include /Blender in the ~OneDrive root but not if elsewhere in OneDrive
/Blender
#
# Include these directories(or files) in 'Pictures' folder(s), that have a space in their name
Pictures/Camera Roll
Pictures/Saved Pictures
#
# Include these names if they match any file or folder
Cinema Soc
Codes
Textbooks
Year 2
```
The following are supported for pattern matching and exclusion rules:
* Use the `*` to wildcard select any characters to match for the item to be included
* Use either `!` or `-` characters at the start of the line to exclude an otherwise included item
**Note:** When enabling the use of 'sync_list,' utilise the `--display-config` option to validate that your configuration will be used by the application, and test your configuration by adding `--dry-run` to ensure the client will operate as per your requirement.
**Note:** After changing the sync_list, you must perform a full re-synchronisation by adding `--resync` to your existing command line - for example: `onedrive --sync --resync`
**Note:** In some circumstances, it may be required to sync all the individual files within the 'sync_dir', but due to frequent name change / addition / deletion of these files, it is not desirable to constantly change the 'sync_list' file to include / exclude these files and force a resync. To assist with this, enable the following in your configuration file:
```text
sync_root_files = "true"
```
This will tell the application to sync any file that it finds in your 'sync_dir' root by default, negating the need to constantly update your 'sync_list' file.
### Performing a --resync
If you alter any of the subsequent configuration items, you will be required to execute a `--resync` to make sure your client is syncing your data with the updated configuration:
* drive_id
* sync_dir
* skip_file
* skip_dir
* skip_dotfiles
* skip_symlinks
* sync_business_shared_items
* Creating, Modifying or Deleting the 'sync_list' file
Additionally, you might opt for a `--resync` if you think it's necessary to ensure your data remains in sync. If you're using this switch simply because you're unsure of the sync status, you can check the actual sync status using `--display-sync-status`.
When you use `--resync`, you'll encounter the following warning and advice:
```text
Using --resync will delete your local 'onedrive' client state, so there won't be a record of your current 'sync status.'
This may potentially overwrite local versions of files with older versions downloaded from OneDrive, leading to local data loss.
If in doubt, back up your local data before using --resync.
Are you sure you want to proceed with --resync? [Y/N]
```
To proceed with `--resync`, you must type 'y' or 'Y' to allow the application to continue.
**Note:** It's highly recommended to use `--resync` only if the application prompts you to do so. Don't blindly set the application to start with `--resync` as the default option.
**Note:** In certain automated environments (assuming you know what you're doing due to automation), to avoid the 'proceed with acknowledgement' requirement, add `--resync-auth` to automatically acknowledge the prompt.
### Performing a --force-sync without a --resync or changing your configuration
In some cases and situations, you may have configured the application to skip certain files and folders using 'skip_file' and 'skip_dir' configuration. You then may have a requirement to actually sync one of these items, but do not wish to modify your configuration, nor perform an entire `--resync` twice.
The `--force-sync` option allows you to sync a specific directory, ignoring your 'skip_file' and 'skip_dir' configuration and negating the requirement to perform a `--resync`.
To use this option, you must run the application manually in the following manner:
```text
onedrive --sync --single-directory '<directory_to_sync>' --force-sync <add any other options needed or required>
```
When using `--force-sync`, you'll encounter the following warning and advice:
```text
WARNING: Overriding application configuration to use application defaults for skip_dir and skip_file due to --sync --single-directory --force-sync being used
Using --force-sync will reconfigure the application to use defaults. This may have unknown future impacts.
By proceeding with this option, you accept any impacts, including potential data loss resulting from using --force-sync.
Are you sure you want to proceed with --force-sync [Y/N]
```
To proceed with `--force-sync`, you must type 'y' or 'Y' to allow the application to continue.
### Enabling the Client Activity Log
When running onedrive, all actions can be logged to a separate log file. This can be enabled by using the `--enable-logging` flag. By default, log files will be written to `/var/log/onedrive/` and will be in the format of `%username%.onedrive.log`, where `%username%` represents the user who ran the client to allow easy sorting of user to client activity log.
**Note:** You will need to ensure the existence of this directory and that your user has the applicable permissions to write to this directory; otherwise, the following error message will be printed:
```text
ERROR: Unable to access /var/log/onedrive
ERROR: Please manually create '/var/log/onedrive' and set appropriate permissions to allow write access
ERROR: The requested client activity log will instead be located in your user's home directory
```
On many systems, this can be achieved by performing the following:
```text
sudo mkdir /var/log/onedrive
sudo chown root:users /var/log/onedrive
sudo chmod 0775 /var/log/onedrive
```
Additionally, you need to ensure that your user account is part of the 'users' group:
```
cat /etc/group | grep users
```
If your user is not part of this group, then you need to add your user to this group:
```
sudo usermod -a -G users <your-user-name>
```
If you need to make a group modification, you will need to 'logout' of all sessions / SSH sessions to log in again to have the new group access applied.
If the client is unable to write the client activity log, the following error message will be printed:
```text
ERROR: Unable to write the activity log to /var/log/onedrive/%username%.onedrive.log
ERROR: Please set appropriate permissions to allow write access to the logging directory for your user account
ERROR: The requested client activity log will instead be located in your user's home directory
```
If you receive this error message, you will need to diagnose why your system cannot write to the specified file location.
#### Client Activity Log Example:
An example of a client activity log for the command `onedrive --sync --enable-logging` is below:
```text
2023-Sep-27 08:16:00.1128806 Configuring Global Azure AD Endpoints
2023-Sep-27 08:16:00.1160620 Sync Engine Initialised with new Onedrive API instance
2023-Sep-27 08:16:00.5227122 All application operations will be performed in: /home/user/OneDrive
2023-Sep-27 08:16:00.5227977 Fetching items from the OneDrive API for Drive ID: <redacted>
2023-Sep-27 08:16:00.7780979 Processing changes and items received from Microsoft OneDrive ...
2023-Sep-27 08:16:00.7781548 Performing a database consistency and integrity check on locally stored data ...
2023-Sep-27 08:16:00.7785889 Scanning the local file system '~/OneDrive' for new data to upload ...
2023-Sep-27 08:16:00.7813710 Performing a final true-up scan of online data from Microsoft OneDrive
2023-Sep-27 08:16:00.7814668 Fetching items from the OneDrive API for Drive ID: <redacted>
2023-Sep-27 08:16:01.0141776 Processing changes and items received from Microsoft OneDrive ...
2023-Sep-27 08:16:01.0142454 Sync with Microsoft OneDrive is complete
```
An example of a client activity log for the command `onedrive --sync --verbose --enable-logging` is below:
```text
2023-Sep-27 08:20:05.4600464 Checking Application Version ...
2023-Sep-27 08:20:05.5235017 Attempting to initialise the OneDrive API ...
2023-Sep-27 08:20:05.5237207 Configuring Global Azure AD Endpoints
2023-Sep-27 08:20:05.5238087 The OneDrive API was initialised successfully
2023-Sep-27 08:20:05.5238536 Opening the item database ...
2023-Sep-27 08:20:05.5270612 Sync Engine Initialised with new Onedrive API instance
2023-Sep-27 08:20:05.9226535 Application version: vX.Y.Z-A-bcdefghi
2023-Sep-27 08:20:05.9227079 Account Type: <account-type>
2023-Sep-27 08:20:05.9227360 Default Drive ID: <redacted>
2023-Sep-27 08:20:05.9227550 Default Root ID: <redacted>
2023-Sep-27 08:20:05.9227862 Remaining Free Space: <space-available>
2023-Sep-27 08:20:05.9228296 All application operations will be performed in: /home/user/OneDrive
2023-Sep-27 08:20:05.9228989 Fetching items from the OneDrive API for Drive ID: <redacted>
2023-Sep-27 08:20:06.2076569 Performing a database consistency and integrity check on locally stored data ...
2023-Sep-27 08:20:06.2077121 Processing DB entries for this Drive ID: <redacted>
2023-Sep-27 08:20:06.2078408 Processing ~/OneDrive
2023-Sep-27 08:20:06.2078739 The directory has not changed
2023-Sep-27 08:20:06.2079783 Processing Attachments
2023-Sep-27 08:20:06.2080071 The directory has not changed
2023-Sep-27 08:20:06.2081585 Processing Attachments/file.docx
2023-Sep-27 08:20:06.2082079 The file has not changed
2023-Sep-27 08:20:06.2082760 Processing Documents
2023-Sep-27 08:20:06.2083225 The directory has not changed
2023-Sep-27 08:20:06.2084284 Processing Documents/file.log
2023-Sep-27 08:20:06.2084886 The file has not changed
2023-Sep-27 08:20:06.2085150 Scanning the local file system '~/OneDrive' for new data to upload ...
2023-Sep-27 08:20:06.2087133 Skipping item - excluded by sync_list config: ./random_25k_files
2023-Sep-27 08:20:06.2116235 Performing a final true-up scan of online data from Microsoft OneDrive
2023-Sep-27 08:20:06.2117190 Fetching items from the OneDrive API for Drive ID: <redacted>
2023-Sep-27 08:20:06.5049743 Sync with Microsoft OneDrive is complete
```
#### Client Activity Log Differences
Despite application logging being enabled as early as possible, the following log entries will be missing from the client activity log when compared to console output:
**No user configuration file:**
```text
No user or system config file found, using application defaults
Using 'user' configuration path for application state data: /home/user/.config/onedrive
Using the following path to store the runtime application log: /var/log/onedrive
```
**User configuration file:**
```text
Reading configuration file: /home/user/.config/onedrive/config
Configuration file successfully loaded
Using 'user' configuration path for application state data: /home/user/.config/onedrive
Using the following path to store the runtime application log: /var/log/onedrive
```
### GUI Notifications
If notification support has been compiled in (refer to GUI Notification Support in install.md .. ADD LINK LATER), the following events will trigger a GUI notification within the display manager session:
* Aborting a sync if .nosync file is found
* Skipping a particular item due to an invalid name
* Skipping a particular item due to an invalid symbolic link
* Skipping a particular item due to an invalid UTF sequence
* Skipping a particular item due to an invalid character encoding sequence
* Cannot create remote directory
* Cannot upload file changes (free space issue, breaches maximum allowed size, breaches maximum OneDrive Account path length)
* Cannot delete remote file / folder
* Cannot move remote file / folder
* When a re-authentication is required
* When a new client version is available
* Files that fail to upload
* Files that fail to download
### Handling a Microsoft OneDrive Account Password Change
If you change your Microsoft OneDrive Account Password, the client will no longer be authorised to sync, and will generate the following error upon next application run:
```text
AADSTS50173: The provided grant has expired due to it being revoked, a fresh auth token is needed. The user might have changed or reset their password. The grant was issued on '<date-and-timestamp>' and the TokensValidFrom date (before which tokens are not valid) for this user is '<date-and-timestamp>'.
ERROR: You will need to issue a --reauth and re-authorise this client to obtain a fresh auth token.
```
To re-authorise the client, follow the steps below:
1. If running the client as a system service (init.d or systemd), stop the applicable system service
2. Run the command `onedrive --reauth`. This will clean up the previous authorisation, and will prompt you to re-authorise the client as per initial configuration. Please note, if you are using `--confdir` as part of your application runtime configuration, you must include this when telling the client to re-authenticate.
3. Restart the client if running as a system service or perform the standalone sync operation again
The application will now sync with OneDrive with the new credentials.
### Determining the synchronisation result
When the client has finished syncing without errors, the following will be displayed:
```
Sync with Microsoft OneDrive is complete
```
If any items failed to sync, the following will be displayed:
```
Sync with Microsoft OneDrive has completed, however there are items that failed to sync.
```
A file list of either upload or download items will be then listed to allow you to determine your next steps.
In order to fix the upload or download failures, you may need to re-try your command and perform a resync to ensure your system is correctly synced with your Microsoft OneDrive Account.
## Frequently Asked Configuration Questions
### How to change the default configuration of the client?
Configuration is determined by three layers, and applied in the following order:
* Application default values
* Values that are set in the configuration file
* Values that are passed in via the command line at application runtime. These values will override any configuration file set value.
The default application values provide a reasonable operational default, and additional configuration is entirely optional.
If you want to change the application defaults, you can download a copy of the config file into your application configuration directory. Valid default directories for the config file are:
* `~/.config/onedrive`
* `/etc/onedrive`
**Example:** To download a copy of the config file, use the following:
```text
mkdir -p ~/.config/onedrive
wget https://raw.githubusercontent.com/abraunegg/onedrive/master/config -O ~/.config/onedrive/config
```
For full configuration options and CLI switches, please refer to application-config-options.md
### How to change where my data from Microsoft OneDrive is stored?
By default, the location where your Microsoft OneDrive data is stored, is within your Home Directory under a directory called 'OneDrive'. This replicates as close as possible where the Microsoft Windows OneDrive client stores data.
To change this location, the application configuration option 'sync_dir' is used to specify a new local directory where your Microsoft OneDrive data should be stored.
**Important Note:** If your `sync_dir` is pointing to a network mount point (a network share via NFS, Windows Network Share, Samba Network Share) these types of network mount points do not support 'inotify', thus tracking real-time changes via inotify of local files is not possible when using 'Monitor Mode'. Local filesystem changes will be replicated between the local filesystem and Microsoft OneDrive based on the `monitor_interval` value. This is not something (inotify support for NFS, Samba) that this client can fix.
### How to change what file and directory permissions are assigned to data that is downloaded from Microsoft OneDrive?
The following are the application default permissions for any new directory or file that is created locally when downloaded from Microsoft OneDrive:
* Directories: 700 - This provides the following permissions: `drwx------`
* Files: 600 - This provides the following permissions: `-rw-------`
These default permissions align to the security principal of 'least privilege' so that only you should have access to your data that you download from Microsoft OneDrive.
To alter these default permissions, you can adjust the values of two configuration options as follows. You can also use the [Unix Permissions Calculator](https://chmod-calculator.com/) to help you determine the necessary new permissions.
```text
sync_dir_permissions = "700"
sync_file_permissions = "600"
```
**Important:** Please note that special permission bits such as setuid, setgid, and the sticky bit are not supported. Valid permission values range from `000` to `777` only.
### How to only sync a specific directory?
There are two methods to achieve this:
* Employ the '--single-directory' option to only sync this specific path
* Employ 'sync_list' as part of your 'config' file to configure what files and directories to sync, and what should be excluded
### How to 'skip' files from syncing?
There are two methods to achieve this:
* Employ 'skip_file' as part of your 'config' file to configure what files to skip
* Employ 'sync_list' to configure what files and directories to sync, and what should be excluded
### How to 'skip' directories from syncing?
There are three methods available to 'skip' a directory from the sync process:
* Employ 'skip_dir' as part of your 'config' file to configure what directories to skip
* Employ 'sync_list' to configure what files and directories to sync, and what should be excluded
* Employ 'check_nosync' as part of your 'config' file and a '.nosync' empty file within the directory to exclude to skip that directory
### How to 'skip' dot files and folders from syncing?
There are three methods to achieve this:
* Employ 'skip_file' or 'skip_dir' to configure what files or folders to skip
* Employ 'sync_list' to configure what files and directories to sync, and what should be excluded
* Employ 'skip_dotfiles' as part of your 'config' file to skip any dot file (for example: `.Trash-1000` or `.xdg-volume-info`) from syncing to OneDrive
### How to 'skip' files larger than a certain size from syncing?
Use `skip_size = "value"` as part of your 'config' file where files larger than this size (in MB) will be skipped.
### How to 'rate limit' the application to control bandwidth consumed for upload & download operations?
To minimise the Internet bandwidth for upload and download operations, you can add the 'rate_limit' configuration option as part of your 'config' file.
The default value is '0' which means use all available bandwidth for the application.
The value being used can be reviewed when using `--display-config`.
### How can I prevent my local disk from filling up?
By default, the application will reserve 50MB of disk space to prevent your filesystem from running out of disk space.
This default value can be modified by adding the 'space_reservation' configuration option and the applicable value as part of your 'config' file.
You can review the value being used when using `--display-config`.
### How does the client handle symbolic links?
Microsoft OneDrive has no concept or understanding of symbolic links, and attempting to upload a symbolic link to Microsoft OneDrive generates a platform API error. All data (files and folders) that are uploaded to OneDrive must be whole files or actual directories.
As such, there are only two methods to support symbolic links with this client:
1. Follow the Linux symbolic link and upload whatever the local symbolic link is pointing to to Microsoft OneDrive. This is the default behaviour.
2. Skip symbolic links by configuring the application to do so. When skipping, no data, no link, no reference is uploaded to OneDrive.
Use 'skip_symlinks' as part of your 'config' file to configure the skipping of all symbolic links while syncing.
### How to synchronise shared folders (OneDrive Personal)?
Folders shared with you can be synchronised by adding them to your OneDrive online. To do that, open your OneDrive account online, go to the Shared files list, right-click on the folder you want to synchronise, and then click on "Add to my OneDrive".
### How to synchronise shared folders (OneDrive Business or Office 365)?
Folders shared with you can be synchronised by adding them to your OneDrive online. To do that, open your OneDrive account online, go to the Shared files list, right-click on the folder you want to synchronise, and then click on "Add to my OneDrive".
Refer to [./business-shared-folders.md](business-shared-folders.md) for further details.
### How to synchronise SharePoint / Office 365 Shared Libraries?
There are two methods to achieve this:
* SharePoint library can be directly added to your OneDrive online. To do that, open your OneDrive account online, go to the Shared files list, right-click on the SharePoint Library you want to synchronise, and then click on "Add to my OneDrive".
* Configure a separate application instance to only synchronise that specific SharePoint Library. Refer to [./sharepoint-libraries.md](sharepoint-libraries.md) for configuration assistance.
### How to Create a Shareable Link?
In certain situations, you might want to generate a shareable file link and provide this link to other users for accessing a specific file.
To accomplish this, employ the following command:
```text
onedrive --create-share-link <path/to/file>
```
**Note:** By default, this access permissions for the file link will be read-only.
To make it a read-write link, execute the following command:
```text
onedrive --create-share-link <path/to/file> --with-editing-perms
```
**Note:** The order of the file path and option flag is crucial.
### How to Synchronise Both Personal and Business Accounts at once?
You need to set up separate instances of the application configuration for each account.
Refer to [./advanced-usage.md](advanced-usage.md) for guidance on configuration.
### How to Synchronise Multiple SharePoint Libraries simultaneously?
For each SharePoint Library, configure a separate instance of the application configuration.
Refer to [./advanced-usage.md](advanced-usage.md) for configuration instructions.
### How to Receive Real-time Changes from Microsoft OneDrive Service, instead of waiting for the next sync period?
When operating in 'Monitor Mode,' it may be advantageous to receive real-time updates to online data. A 'webhook' is the method to achieve this, so that when in 'Monitor Mode,' the client subscribes to remote updates.
Remote changes can then be promptly synchronised to your local file system, without waiting for the next synchronisation cycle.
This is accomplished by:
* Using 'webhook_enabled' as part of your 'config' file to enable this feature
* Using 'webhook_public_url' as part of your 'config' file to configure the URL the webhook will use for subscription updates
### How to initiate the client as a background service?
There are a few ways to employ onedrive as a service:
* via init.d
* via systemd
* via runit
#### OneDrive service running as root user via init.d
```text
chkconfig onedrive on
service onedrive start
```
To view the logs, execute:
```text
tail -f /var/log/onedrive/<username>.onedrive.log
```
To alter the 'user' under which the client operates (typically root by default), manually modify the init.d service file and adjust `daemon --user root onedrive_service.sh` to match the correct user.
#### OneDrive service running as root user via systemd (Arch, Ubuntu, Debian, OpenSuSE, Fedora)
Initially, switch to the root user with `su - root`, then activate the systemd service:
```text
systemctl --user enable onedrive
systemctl --user start onedrive
```
**Note:** The `systemctl --user` command is not applicable to Red Hat Enterprise Linux (RHEL) or CentOS Linux platforms - see below.
**Note:** This will execute the 'onedrive' process with a UID/GID of '0', which means any files or folders created will be owned by 'root'.
To monitor the service's status, use the following:
```text
systemctl --user status onedrive.service
```
To observe the systemd application logs, use:
```text
journalctl --user-unit=onedrive -f
```
**Note:** For systemd to function correctly, it requires the presence of XDG environment variables. If you encounter the following error while enabling the systemd service:
```text
Failed to connect to bus: No such file or directory
```
The most likely cause is missing XDG environment variables. To resolve this, add the following lines to `.bashrc` or another file executed upon user login:
```text
export XDG_RUNTIME_DIR="/run/user/$UID"
export DBUS_SESSION_BUS_ADDRESS="unix:path=${XDG_RUNTIME_DIR}/bus"
```
To apply this change, you must log out of all user accounts where it has been made.
**Note:** On certain systems (e.g., Raspbian / Ubuntu / Debian on Raspberry Pi), the XDG fix above may not persist after system reboots. An alternative to starting the client via systemd as root is as follows:
1. Create a symbolic link from `/home/root/.config/onedrive` to `/root/.config/onedrive/`.
2. Establish a systemd service using the '@' service file: `systemctl enable onedrive@root.service`.
3. Start the root@service: `systemctl start onedrive@root.service`.
This ensures that the service correctly restarts upon system reboot.
To examine the systemd application logs, run:
```text
journalctl --unit=onedrive@<username> -f
```
#### OneDrive service running as root user via systemd (Red Hat Enterprise Linux, CentOS Linux)
```text
systemctl enable onedrive
systemctl start onedrive
```
**Note:** This will execute the 'onedrive' process with a UID/GID of '0', meaning any files or folders created will be owned by 'root'.
To view the systemd application logs, execute:
```text
journalctl --unit=onedrive -f
```
#### OneDrive service running as a non-root user via systemd (All Linux Distributions)
In some instances, it is preferable to run the OneDrive client as a service without the 'root' user. Follow the instructions below to configure the service for your regular user login.
1. As the user who will run the service, launch the application in standalone mode, authorize it for use, and verify that synchronization is functioning as expected:
```text
onedrive --sync --verbose
```
2. After validating the application for your user, switch to the 'root' user, where <username> is your username from step 1 above.
```text
systemctl enable onedrive@<username>.service
systemctl start onedrive@<username>.service
```
3. To check the service's status for the user, use the following:
```text
systemctl status onedrive@<username>.service
```
To observe the systemd application logs, use:
```text
journalctl --unit=onedrive@<username> -f
```
#### OneDrive service running as a non-root user via systemd (with notifications enabled) (Arch, Ubuntu, Debian, OpenSuSE, Fedora)
In some scenarios, you may want to receive GUI notifications when using the client as a non-root user. In this case, follow these steps:
1. Log in via the graphical UI as the user you want to enable the service for.
2. Disable any `onedrive@` service files for your username, e.g.:
```text
sudo systemctl stop onedrive@alex.service
sudo systemctl disable onedrive@alex.service
```
3. Enable the service as follows:
```text
systemctl --user enable onedrive
systemctl --user start onedrive
```
To check the service's status for the user, use the following:
```text
systemctl --user status onedrive.service
```
To view the systemd application logs, execute:
```text
journalctl --user-unit=onedrive -f
```
**Note:** The `systemctl --user` command is not applicable to Red Hat Enterprise Linux (RHEL) or CentOS Linux platforms.
#### OneDrive service running as a non-root user via runit (antiX, Devuan, Artix, Void)
1. Create the following folder if it doesn't already exist: `/etc/sv/runsvdir-<username>`
- where `<username>` is the `USER` targeted for the service
- e.g., `# mkdir /etc/sv/runsvdir-nolan`
2. Create a file called `run` under the previously created folder with executable permissions
- `# touch /etc/sv/runsvdir-<username>/run`
- `# chmod 0755 /etc/sv/runsvdir-<username>/run`
3. Edit the `run` file with the following contents (permissions needed):
```sh
#!/bin/sh
export USER="<username>"
export HOME="/home/<username>"
groups="$(id -Gn "${USER}" | tr ' ' ':')"
svdir="${HOME}/service"
exec chpst -u "${USER}:${groups}" runsvdir "${svdir}"
```
- Ensure you replace `<username>` with the `USER` set in step #1.
4. Enable the previously created folder as a service
- `# ln -fs /etc/sv/runsvdir-<username> /var/service/`
5. Create a subfolder in the `USER`'s `HOME` directory to store the services (or symlinks)
- `$ mkdir ~/service`
6. Create a subfolder specifically for OneDrive
- `$ mkdir ~/service/onedrive/`
7. Create a file called `run` under the previously created folder with executable permissions
- `$ touch ~/service/onedrive/run`
- `$ chmod 0755 ~/service/onedrive/run`
8. Append the following contents to the `run` file
```sh
#!/usr/bin/env sh
exec /usr/bin/onedrive --monitor
```
- In some scenarios, the path to the `onedrive` binary may vary. You can obtain it by running `$ command -v onedrive`.
9. Reboot to apply the changes
10. Check the status of user-defined services
- `$ sv status ~/service/*`
For additional details, you can refer to Void's documentation on [Per-User Services](https://docs.voidlinux.org/config/services/user-services.html).
### How to start a user systemd service at boot without user login?
In some situations, it may be necessary for the systemd service to start without requiring your 'user' to log in.
To address this issue, you need to reconfigure your 'user' account so that the systemd services you've created launch without the need for you to log in to your system:
```text
loginctl enable-linger <your_user_name>
```

View file

@ -5,11 +5,13 @@
[![Build Docker Images](https://github.com/abraunegg/onedrive/actions/workflows/docker.yaml/badge.svg)](https://github.com/abraunegg/onedrive/actions/workflows/docker.yaml)
[![Docker Pulls](https://img.shields.io/docker/pulls/driveone/onedrive)](https://hub.docker.com/r/driveone/onedrive)
A free Microsoft OneDrive Client which supports OneDrive Personal, OneDrive for Business, OneDrive for Office365 and SharePoint.
Introducing a free Microsoft OneDrive Client that seamlessly supports OneDrive Personal, OneDrive for Business, OneDrive for Office365, and SharePoint Libraries.
This powerful and highly configurable client can run on all major Linux distributions, FreeBSD, or as a Docker container. It supports one-way and two-way sync capabilities and securely connects to Microsoft OneDrive services.
This robust and highly customisable client is compatible with all major Linux distributions and FreeBSD, and can also be deployed as a container using Docker or Podman. It offers both one-way and two-way synchronisation capabilities while ensuring a secure connection to Microsoft OneDrive services.
This client is a 'fork' of the [skilion](https://github.com/skilion/onedrive) client, which the developer has confirmed he has no desire to maintain or support the client ([reference](https://github.com/skilion/onedrive/issues/518#issuecomment-717604726)). This fork has been in active development since mid 2018.
Originally derived as a 'fork' from the [skilion](https://github.com/skilion/onedrive) client, it's worth noting that the developer of the original client has explicitly stated they have no intention of maintaining or supporting their work ([reference](https://github.com/skilion/onedrive/issues/518#issuecomment-717604726)).
This client represents a 100% re-imagining of the original work, addressing numerous notable bugs and issues while incorporating a significant array of new features. This client has been under active development since mid-2018.
## Features
* State caching
@ -26,6 +28,7 @@ This client is a 'fork' of the [skilion](https://github.com/skilion/onedrive) cl
* Support for National cloud deployments (Microsoft Cloud for US Government, Microsoft Cloud Germany, Azure and Office 365 operated by 21Vianet in China)
* Supports single & multi-tenanted applications
* Supports rate limiting of traffic
* Supports multi-threaded uploads and downloads
## What's missing
* Ability to encrypt/decrypt files on-the-fly when uploading/downloading files from OneDrive
@ -36,28 +39,17 @@ This client is a 'fork' of the [skilion](https://github.com/skilion/onedrive) cl
* Colorful log output terminal modification: [OneDrive Client for Linux Colorful log Output](https://github.com/zzzdeb/dotfiles/blob/master/scripts/tools/onedrive_log)
* System Tray Icon: [OneDrive Client for Linux System Tray Icon](https://github.com/DanielBorgesOliveira/onedrive_tray)
## Supported Application Version
Only the current application release version or greater is supported.
The current application release version is: [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases)
Check the version of the application you are using `onedrive --version` and ensure that you are running either the current release or compile the application yourself from master to get the latest version.
If you are not using the above application version or greater, you must upgrade your application to obtain support.
## Have a Question
If you have a question or need something clarified, please raise a new disscussion post [here](https://github.com/abraunegg/onedrive/discussions)
Be sure to review the Frequently Asked Questions as well before raising a new discussion post.
## Frequently Asked Questions
Refer to [Frequently Asked Questions](https://github.com/abraunegg/onedrive/wiki/Frequently-Asked-Questions)
## Reporting an Issue or Bug
If you encounter any bugs you can report them here on GitHub. Before filing an issue be sure to:
## Have a question
If you have a question or need something clarified, please raise a new disscussion post [here](https://github.com/abraunegg/onedrive/discussions)
1. Check the version of the application you are using `onedrive --version` and ensure that you are running a supported application version. If you are not using a supported application version, you must first upgrade your application to a supported version and then re-test for your issue.
2. If you are using a supported applcation version, fill in a new bug report using the [issue template](https://github.com/abraunegg/onedrive/issues/new?template=bug_report.md)
## Reporting an Issue or Bug
If you encounter any bugs you can report them here on Github. Before filing an issue be sure to:
1. Check the version of the application you are using `onedrive --version` and ensure that you are running either the latest [release](https://github.com/abraunegg/onedrive/releases) or built from master.
2. Fill in a new bug report using the [issue template](https://github.com/abraunegg/onedrive/issues/new?template=bug_report.md)
3. Generate a debug log for support using the following [process](https://github.com/abraunegg/onedrive/wiki/Generate-debug-log-for-support)
* If you are in *any* way concerned regarding the sensitivity of the data contained with in the verbose debug log file, create a new OneDrive account, configure the client to use that, use *dummy* data to simulate your environment and then replicate your original issue
* If you are still concerned, provide an NDA or confidentiality document to sign
@ -70,23 +62,23 @@ Refer to [docs/known-issues.md](https://github.com/abraunegg/onedrive/blob/maste
## Documentation and Configuration Assistance
### Installing from Distribution Packages or Building the OneDrive Client for Linux from source
Refer to [docs/INSTALL.md](https://github.com/abraunegg/onedrive/blob/master/docs/INSTALL.md)
Refer to [docs/install.md](https://github.com/abraunegg/onedrive/blob/master/docs/install.md)
### Configuration and Usage
Refer to [docs/USAGE.md](https://github.com/abraunegg/onedrive/blob/master/docs/USAGE.md)
Refer to [docs/usage.md](https://github.com/abraunegg/onedrive/blob/master/docs/usage.md)
### Configure OneDrive Business Shared Folders
Refer to [docs/BusinessSharedFolders.md](https://github.com/abraunegg/onedrive/blob/master/docs/BusinessSharedFolders.md)
Refer to [docs/business-shared-folders.md](https://github.com/abraunegg/onedrive/blob/master/docs/business-shared-folders.md)
### Configure SharePoint / Office 365 Shared Libraries (Business or Education)
Refer to [docs/SharePoint-Shared-Libraries.md](https://github.com/abraunegg/onedrive/blob/master/docs/SharePoint-Shared-Libraries.md)
Refer to [docs/sharepoint-libraries.md](https://github.com/abraunegg/onedrive/blob/master/docs/sharepoint-libraries.md)
### Configure National Cloud support
Refer to [docs/national-cloud-deployments.md](https://github.com/abraunegg/onedrive/blob/master/docs/national-cloud-deployments.md)
### Docker support
Refer to [docs/Docker.md](https://github.com/abraunegg/onedrive/blob/master/docs/Docker.md)
Refer to [docs/docker.md](https://github.com/abraunegg/onedrive/blob/master/docs/docker.md)
### Podman support
Refer to [docs/Podman.md](https://github.com/abraunegg/onedrive/blob/master/docs/Podman.md)
Refer to [docs/podman.md](https://github.com/abraunegg/onedrive/blob/master/docs/podman.md)

384
src/clientSideFiltering.d Normal file
View file

@ -0,0 +1,384 @@
// What is this module called?
module clientSideFiltering;
// What does this module require to function?
import std.algorithm;
import std.array;
import std.file;
import std.path;
import std.regex;
import std.stdio;
import std.string;
// What other modules that we have created do we need to import?
import config;
import util;
import log;
class ClientSideFiltering {
// Class variables
ApplicationConfig appConfig;
string[] paths;
string[] businessSharedItemsList;
Regex!char fileMask;
Regex!char directoryMask;
bool skipDirStrictMatch = false;
bool skipDotfiles = false;
this(ApplicationConfig appConfig) {
// Configure the class varaible to consume the application configuration
this.appConfig = appConfig;
}
// Initialise the required items
bool initialise() {
//
log.vdebug("Configuring Client Side Filtering (Selective Sync)");
// Load the sync_list file if it exists
if (exists(appConfig.syncListFilePath)){
loadSyncList(appConfig.syncListFilePath);
}
// Load the Business Shared Items file if it exists
if (exists(appConfig.businessSharedItemsFilePath)){
loadBusinessSharedItems(appConfig.businessSharedItemsFilePath);
}
// Configure skip_dir, skip_file, skip-dir-strict-match & skip_dotfiles from config entries
// Handle skip_dir configuration in config file
log.vdebug("Configuring skip_dir ...");
log.vdebug("skip_dir: ", appConfig.getValueString("skip_dir"));
setDirMask(appConfig.getValueString("skip_dir"));
// Was --skip-dir-strict-match configured?
log.vdebug("Configuring skip_dir_strict_match ...");
log.vdebug("skip_dir_strict_match: ", appConfig.getValueBool("skip_dir_strict_match"));
if (appConfig.getValueBool("skip_dir_strict_match")) {
setSkipDirStrictMatch();
}
// Was --skip-dot-files configured?
log.vdebug("Configuring skip_dotfiles ...");
log.vdebug("skip_dotfiles: ", appConfig.getValueBool("skip_dotfiles"));
if (appConfig.getValueBool("skip_dotfiles")) {
setSkipDotfiles();
}
// Handle skip_file configuration in config file
log.vdebug("Configuring skip_file ...");
// Validate skip_file to ensure that this does not contain an invalid configuration
// Do not use a skip_file entry of .* as this will prevent correct searching of local changes to process.
foreach(entry; appConfig.getValueString("skip_file").split("|")){
if (entry == ".*") {
// invalid entry element detected
log.error("ERROR: Invalid skip_file entry '.*' detected");
return false;
}
}
// All skip_file entries are valid
log.vdebug("skip_file: ", appConfig.getValueString("skip_file"));
setFileMask(appConfig.getValueString("skip_file"));
// All configured OK
return true;
}
// Load sync_list file if it exists
void loadSyncList(string filepath) {
// open file as read only
auto file = File(filepath, "r");
auto range = file.byLine();
foreach (line; range) {
// Skip comments in file
if (line.length == 0 || line[0] == ';' || line[0] == '#') continue;
paths ~= buildNormalizedPath(line);
}
file.close();
}
// load business_shared_folders file
void loadBusinessSharedItems(string filepath) {
// open file as read only
auto file = File(filepath, "r");
auto range = file.byLine();
foreach (line; range) {
// Skip comments in file
if (line.length == 0 || line[0] == ';' || line[0] == '#') continue;
businessSharedItemsList ~= buildNormalizedPath(line);
}
file.close();
}
// Configure the regex that will be used for 'skip_file'
void setFileMask(const(char)[] mask) {
fileMask = wild2regex(mask);
log.vdebug("Selective Sync File Mask: ", fileMask);
}
// Configure the regex that will be used for 'skip_dir'
void setDirMask(const(char)[] dirmask) {
directoryMask = wild2regex(dirmask);
log.vdebug("Selective Sync Directory Mask: ", directoryMask);
}
// Configure skipDirStrictMatch if function is called
// By default, skipDirStrictMatch = false;
void setSkipDirStrictMatch() {
skipDirStrictMatch = true;
}
// Configure skipDotfiles if function is called
// By default, skipDotfiles = false;
void setSkipDotfiles() {
skipDotfiles = true;
}
// return value of skipDotfiles
bool getSkipDotfiles() {
return skipDotfiles;
}
// Match against sync_list only
bool isPathExcludedViaSyncList(string path) {
// Debug output that we are performing a 'sync_list' inclusion / exclusion test
return isPathExcluded(path, paths);
}
// config file skip_dir parameter
bool isDirNameExcluded(string name) {
// Does the directory name match skip_dir config entry?
// Returns true if the name matches a skip_dir config entry
// Returns false if no match
log.vdebug("skip_dir evaluation for: ", name);
// Try full path match first
if (!name.matchFirst(directoryMask).empty) {
log.vdebug("'!name.matchFirst(directoryMask).empty' returned true = matched");
return true;
} else {
// Do we check the base name as well?
if (!skipDirStrictMatch) {
log.vdebug("No Strict Matching Enforced");
// Test the entire path working backwards from child
string path = buildNormalizedPath(name);
string checkPath;
auto paths = pathSplitter(path);
foreach_reverse(directory; paths) {
if (directory != "/") {
// This will add a leading '/' but that needs to be stripped to check
checkPath = "/" ~ directory ~ checkPath;
if(!checkPath.strip('/').matchFirst(directoryMask).empty) {
log.vdebug("'!checkPath.matchFirst(directoryMask).empty' returned true = matched");
return true;
}
}
}
} else {
log.vdebug("Strict Matching Enforced - No Match");
}
}
// no match
return false;
}
// config file skip_file parameter
bool isFileNameExcluded(string name) {
// Does the file name match skip_file config entry?
// Returns true if the name matches a skip_file config entry
// Returns false if no match
log.vdebug("skip_file evaluation for: ", name);
// Try full path match first
if (!name.matchFirst(fileMask).empty) {
return true;
} else {
// check just the file name
string filename = baseName(name);
if(!filename.matchFirst(fileMask).empty) {
return true;
}
}
// no match
return false;
}
// test if the given path is not included in the allowed paths
// if there are no allowed paths always return false
private bool isPathExcluded(string path, string[] allowedPaths) {
// function variables
bool exclude = false;
bool exludeDirectMatch = false; // will get updated to true, if there is a pattern match to sync_list entry
bool excludeMatched = false; // will get updated to true, if there is a pattern match to sync_list entry
bool finalResult = true; // will get updated to false, if pattern match to sync_list entry
int offset;
string wildcard = "*";
// always allow the root
if (path == ".") return false;
// if there are no allowed paths always return false
if (allowedPaths.empty) return false;
path = buildNormalizedPath(path);
log.vdebug("Evaluation against 'sync_list' for this path: ", path);
log.vdebug("[S]exclude = ", exclude);
log.vdebug("[S]exludeDirectMatch = ", exludeDirectMatch);
log.vdebug("[S]excludeMatched = ", excludeMatched);
// unless path is an exact match, entire sync_list entries need to be processed to ensure
// negative matches are also correctly detected
foreach (allowedPath; allowedPaths) {
// is this an inclusion path or finer grained exclusion?
switch (allowedPath[0]) {
case '-':
// sync_list path starts with '-', this user wants to exclude this path
exclude = true;
// If the sync_list entry starts with '-/' offset needs to be 2, else 1
if (startsWith(allowedPath, "-/")){
// Offset needs to be 2
offset = 2;
} else {
// Offset needs to be 1
offset = 1;
}
break;
case '!':
// sync_list path starts with '!', this user wants to exclude this path
exclude = true;
// If the sync_list entry starts with '!/' offset needs to be 2, else 1
if (startsWith(allowedPath, "!/")){
// Offset needs to be 2
offset = 2;
} else {
// Offset needs to be 1
offset = 1;
}
break;
case '/':
// sync_list path starts with '/', this user wants to include this path
// but a '/' at the start causes matching issues, so use the offset for comparison
exclude = false;
offset = 1;
break;
default:
// no negative pattern, default is to not exclude
exclude = false;
offset = 0;
}
// What are we comparing against?
log.vdebug("Evaluation against 'sync_list' entry: ", allowedPath);
// Generate the common prefix from the path vs the allowed path
auto comm = commonPrefix(path, allowedPath[offset..$]);
// Is path is an exact match of the allowed path?
if (comm.length == path.length) {
// we have a potential exact match
// strip any potential '/*' from the allowed path, to avoid a potential lesser common match
string strippedAllowedPath = strip(allowedPath[offset..$], "/*");
if (path == strippedAllowedPath) {
// we have an exact path match
log.vdebug("exact path match");
if (!exclude) {
log.vdebug("Evaluation against 'sync_list' result: direct match");
finalResult = false;
// direct match, break and go sync
break;
} else {
log.vdebug("Evaluation against 'sync_list' result: direct match - path to be excluded");
// do not set excludeMatched = true here, otherwise parental path also gets excluded
// flag exludeDirectMatch so that a 'wildcard match' will not override this exclude
exludeDirectMatch = true;
// final result
finalResult = true;
}
} else {
// no exact path match, but something common does match
log.vdebug("something 'common' matches the input path");
auto splitAllowedPaths = pathSplitter(strippedAllowedPath);
string pathToEvaluate = "";
foreach(base; splitAllowedPaths) {
pathToEvaluate ~= base;
if (path == pathToEvaluate) {
// The input path matches what we want to evaluate against as a direct match
if (!exclude) {
log.vdebug("Evaluation against 'sync_list' result: direct match for parental path item");
finalResult = false;
// direct match, break and go sync
break;
} else {
log.vdebug("Evaluation against 'sync_list' result: direct match for parental path item but to be excluded");
finalResult = true;
// do not set excludeMatched = true here, otherwise parental path also gets excluded
}
}
pathToEvaluate ~= dirSeparator;
}
}
}
// Is path is a subitem/sub-folder of the allowed path?
if (comm.length == allowedPath[offset..$].length) {
// The given path is potentially a subitem of an allowed path
// We want to capture sub-folders / files of allowed paths here, but not explicitly match other items
// if there is no wildcard
auto subItemPathCheck = allowedPath[offset..$] ~ "/";
if (canFind(path, subItemPathCheck)) {
// The 'path' includes the allowed path, and is 'most likely' a sub-path item
if (!exclude) {
log.vdebug("Evaluation against 'sync_list' result: parental path match");
finalResult = false;
// parental path matches, break and go sync
break;
} else {
log.vdebug("Evaluation against 'sync_list' result: parental path match but must be excluded");
finalResult = true;
excludeMatched = true;
}
}
}
// Does the allowed path contain a wildcard? (*)
if (canFind(allowedPath[offset..$], wildcard)) {
// allowed path contains a wildcard
// manually replace '*' for '.*' to be compatible with regex
string regexCompatiblePath = replace(allowedPath[offset..$], "*", ".*");
auto allowedMask = regex(regexCompatiblePath);
if (matchAll(path, allowedMask)) {
// regex wildcard evaluation matches
// if we have a prior pattern match for an exclude, excludeMatched = true
if (!exclude && !excludeMatched && !exludeDirectMatch) {
// nothing triggered an exclusion before evaluation against wildcard match attempt
log.vdebug("Evaluation against 'sync_list' result: wildcard pattern match");
finalResult = false;
} else {
log.vdebug("Evaluation against 'sync_list' result: wildcard pattern matched but must be excluded");
finalResult = true;
excludeMatched = true;
}
}
}
}
// Interim results
log.vdebug("[F]exclude = ", exclude);
log.vdebug("[F]exludeDirectMatch = ", exludeDirectMatch);
log.vdebug("[F]excludeMatched = ", excludeMatched);
// If exclude or excludeMatched is true, then finalResult has to be true
if ((exclude) || (excludeMatched) || (exludeDirectMatch)) {
finalResult = true;
}
// results
if (finalResult) {
log.vdebug("Evaluation against 'sync_list' final result: EXCLUDED");
} else {
log.vdebug("Evaluation against 'sync_list' final result: included for sync");
}
return finalResult;
}
}

File diff suppressed because it is too large Load diff

111
src/curlEngine.d Normal file
View file

@ -0,0 +1,111 @@
// What is this module called?
module curlEngine;
// What does this module require to function?
import std.net.curl;
import etc.c.curl: CurlOption;
import std.datetime;
// What other modules that we have created do we need to import?
import log;
import std.stdio;
class CurlEngine {
HTTP http;
this() {
http = HTTP();
}
void initialise(long dnsTimeout, long connectTimeout, long dataTimeout, long operationTimeout, int maxRedirects, bool httpsDebug, string userAgent, bool httpProtocol, long userRateLimit, long protocolVersion) {
// Curl Timeout Handling
// libcurl dns_cache_timeout timeout
// https://curl.se/libcurl/c/CURLOPT_DNS_CACHE_TIMEOUT.html
// https://dlang.org/library/std/net/curl/http.dns_timeout.html
http.dnsTimeout = (dur!"seconds"(dnsTimeout));
// Timeout for HTTPS connections
// https://curl.se/libcurl/c/CURLOPT_CONNECTTIMEOUT.html
// https://dlang.org/library/std/net/curl/http.connect_timeout.html
http.connectTimeout = (dur!"seconds"(connectTimeout));
// Timeout for activity on connection
// This is a DMD | DLANG specific item, not a libcurl item
// https://dlang.org/library/std/net/curl/http.data_timeout.html
// https://raw.githubusercontent.com/dlang/phobos/master/std/net/curl.d - private enum _defaultDataTimeout = dur!"minutes"(2);
http.dataTimeout = (dur!"seconds"(dataTimeout));
// Maximum time any operation is allowed to take
// This includes dns resolution, connecting, data transfer, etc.
// https://curl.se/libcurl/c/CURLOPT_TIMEOUT_MS.html
// https://dlang.org/library/std/net/curl/http.operation_timeout.html
http.operationTimeout = (dur!"seconds"(operationTimeout));
// Specify how many redirects should be allowed
http.maxRedirects(maxRedirects);
// Debug HTTPS
http.verbose = httpsDebug;
// Use the configured 'user_agent' value
http.setUserAgent = userAgent;
// What IP protocol version should be used when using Curl - IPv4 & IPv6, IPv4 or IPv6
http.handle.set(CurlOption.ipresolve,protocolVersion); // 0 = IPv4 + IPv6, 1 = IPv4 Only, 2 = IPv6 Only
// What version of HTTP protocol do we use?
// Curl >= 7.62.0 defaults to http2 for a significant number of operations
if (httpProtocol) {
// Downgrade to HTTP 1.1 - yes version = 2 is HTTP 1.1
http.handle.set(CurlOption.http_version,2);
}
// Configure upload / download rate limits if configured
// 131072 = 128 KB/s - minimum for basic application operations to prevent timeouts
// A 0 value means rate is unlimited, and is the curl default
if (userRateLimit > 0) {
// set rate limit
http.handle.set(CurlOption.max_send_speed_large,userRateLimit);
http.handle.set(CurlOption.max_recv_speed_large,userRateLimit);
}
// Explicitly set these libcurl options
// https://curl.se/libcurl/c/CURLOPT_NOSIGNAL.html
// Ensure that nosignal is set to 0 - Setting CURLOPT_NOSIGNAL to 0 makes libcurl ask the system to ignore SIGPIPE signals
http.handle.set(CurlOption.nosignal,0);
// https://curl.se/libcurl/c/CURLOPT_TCP_NODELAY.html
// Ensure that TCP_NODELAY is set to 0 to ensure that TCP NAGLE is enabled
http.handle.set(CurlOption.tcp_nodelay,0);
// https://curl.se/libcurl/c/CURLOPT_FORBID_REUSE.html
// CURLOPT_FORBID_REUSE - make connection get closed at once after use
// Ensure that we ARE NOT reusing TCP sockets connections - setting to 0 ensures that we ARE reusing connections (we did this in v2.4.xx) to ensure connections remained open and usable
// Setting this to 1 ensures that when we close the curl instance, any open sockets are closed - which we need to do when running
// multiple threads and API instances at the same time otherwise we run out of local files | sockets pretty quickly
// The libcurl default is 1 - ensure we are configuring not to reuse connections and leave unused sockets open
http.handle.set(CurlOption.forbid_reuse,1);
if (httpsDebug) {
// Output what options we are using so that in the debug log this can be tracked
log.vdebug("http.dnsTimeout = ", dnsTimeout);
log.vdebug("http.connectTimeout = ", connectTimeout);
log.vdebug("http.dataTimeout = ", dataTimeout);
log.vdebug("http.operationTimeout = ", operationTimeout);
log.vdebug("http.maxRedirects = ", maxRedirects);
log.vdebug("http.CurlOption.ipresolve = ", protocolVersion);
}
}
void setMethodPost() {
http.method = HTTP.Method.post;
}
void setMethodPatch() {
http.method = HTTP.Method.patch;
}
void setDisableSSLVerifyPeer() {
log.vdebug("Switching off CurlOption.ssl_verifypeer");
http.handle.set(CurlOption.ssl_verifypeer, 0);
}
}

View file

@ -1,3 +1,7 @@
// What is this module called?
module itemdb;
// What does this module require to function?
import std.datetime;
import std.exception;
import std.path;
@ -5,19 +9,26 @@ import std.string;
import std.stdio;
import std.algorithm.searching;
import core.stdc.stdlib;
import std.json;
import std.conv;
// What other modules that we have created do we need to import?
import sqlite;
static import log;
import util;
import log;
enum ItemType {
file,
dir,
remote
remote,
unknown
}
struct Item {
string driveId;
string id;
string name;
string remoteName;
ItemType type;
string eTag;
string cTag;
@ -28,23 +39,144 @@ struct Item {
string remoteDriveId;
string remoteId;
string syncStatus;
string size;
}
final class ItemDatabase
{
// Construct an Item struct from a JSON driveItem
Item makeDatabaseItem(JSONValue driveItem) {
Item item = {
id: driveItem["id"].str,
name: "name" in driveItem ? driveItem["name"].str : null, // name may be missing for deleted files in OneDrive Business
eTag: "eTag" in driveItem ? driveItem["eTag"].str : null, // eTag is not returned for the root in OneDrive Business
cTag: "cTag" in driveItem ? driveItem["cTag"].str : null, // cTag is missing in old files (and all folders in OneDrive Business)
remoteName: "actualOnlineName" in driveItem ? driveItem["actualOnlineName"].str : null, // actualOnlineName is only used with OneDrive Business Shared Folders
};
// OneDrive API Change: https://github.com/OneDrive/onedrive-api-docs/issues/834
// OneDrive no longer returns lastModifiedDateTime if the item is deleted by OneDrive
if(isItemDeleted(driveItem)) {
// Set mtime to SysTime(0)
item.mtime = SysTime(0);
} else {
// Item is not in a deleted state
// Resolve 'Key not found: fileSystemInfo' when then item is a remote item
// https://github.com/abraunegg/onedrive/issues/11
if (isItemRemote(driveItem)) {
// remoteItem is a OneDrive object that exists on a 'different' OneDrive drive id, when compared to account default
// Normally, the 'remoteItem' field will contain 'fileSystemInfo' however, if the user uses the 'Add Shortcut ..' option in OneDrive WebUI
// to create a 'link', this object, whilst remote, does not have 'fileSystemInfo' in the expected place, thus leading to a application crash
// See: https://github.com/abraunegg/onedrive/issues/1533
if ("fileSystemInfo" in driveItem["remoteItem"]) {
// 'fileSystemInfo' is in 'remoteItem' which will be the majority of cases
item.mtime = SysTime.fromISOExtString(driveItem["remoteItem"]["fileSystemInfo"]["lastModifiedDateTime"].str);
} else {
// is a remote item, but 'fileSystemInfo' is missing from 'remoteItem'
if ("fileSystemInfo" in driveItem) {
item.mtime = SysTime.fromISOExtString(driveItem["fileSystemInfo"]["lastModifiedDateTime"].str);
}
}
} else {
// Does fileSystemInfo exist at all ?
if ("fileSystemInfo" in driveItem) {
item.mtime = SysTime.fromISOExtString(driveItem["fileSystemInfo"]["lastModifiedDateTime"].str);
}
}
}
// Set this item object type
bool typeSet = false;
if (isItemFile(driveItem)) {
// 'file' object exists in the JSON
log.vdebug("Flagging object as a file");
typeSet = true;
item.type = ItemType.file;
}
if (isItemFolder(driveItem)) {
// 'folder' object exists in the JSON
log.vdebug("Flagging object as a directory");
typeSet = true;
item.type = ItemType.dir;
}
if (isItemRemote(driveItem)) {
// 'remote' object exists in the JSON
log.vdebug("Flagging object as a remote");
typeSet = true;
item.type = ItemType.remote;
}
// root and remote items do not have parentReference
if (!isItemRoot(driveItem) && ("parentReference" in driveItem) != null) {
item.driveId = driveItem["parentReference"]["driveId"].str;
if (hasParentReferenceId(driveItem)) {
item.parentId = driveItem["parentReference"]["id"].str;
}
}
// extract the file hash and file size
if (isItemFile(driveItem) && ("hashes" in driveItem["file"])) {
// Get file size
if (hasFileSize(driveItem)) {
item.size = to!string(driveItem["size"].integer);
// Get quickXorHash as default
if ("quickXorHash" in driveItem["file"]["hashes"]) {
item.quickXorHash = driveItem["file"]["hashes"]["quickXorHash"].str;
} else {
log.vdebug("quickXorHash is missing from ", driveItem["id"].str);
}
// If quickXorHash is empty ..
if (item.quickXorHash.empty) {
// Is there a sha256Hash?
if ("sha256Hash" in driveItem["file"]["hashes"]) {
item.sha256Hash = driveItem["file"]["hashes"]["sha256Hash"].str;
} else {
log.vdebug("sha256Hash is missing from ", driveItem["id"].str);
}
}
} else {
// So that we have at least a zero value here as the API provided no 'size' data for this file item
item.size = "0";
}
}
// Is the object a remote drive item - living on another driveId ?
if (isItemRemote(driveItem)) {
item.remoteDriveId = driveItem["remoteItem"]["parentReference"]["driveId"].str;
item.remoteId = driveItem["remoteItem"]["id"].str;
}
// We have 3 different operational modes where 'item.syncStatus' is used to flag if an item is synced or not:
// - National Cloud Deployments do not support /delta as a query
// - When using --single-directory
// - When using --download-only --cleanup-local-files
//
// Thus we need to track in the database that this item is in sync
// As we are making an item, set the syncStatus to Y
// ONLY when either of the three modes above are being used, all the existing DB entries will get set to N
// so when processing /children, it can be identified what the 'deleted' difference is
item.syncStatus = "Y";
// Return the created item
return item;
}
final class ItemDatabase {
// increment this for every change in the db schema
immutable int itemDatabaseVersion = 11;
immutable int itemDatabaseVersion = 12;
Database db;
string insertItemStmt;
string updateItemStmt;
string selectItemByIdStmt;
string selectItemByRemoteIdStmt;
string selectItemByParentIdStmt;
string deleteItemByIdStmt;
bool databaseInitialised = false;
this(const(char)[] filename)
{
this(const(char)[] filename) {
db = Database(filename);
int dbVersion;
try {
@ -99,12 +231,12 @@ final class ItemDatabase
db.exec("PRAGMA locking_mode = EXCLUSIVE");
insertItemStmt = "
INSERT OR REPLACE INTO item (driveId, id, name, type, eTag, cTag, mtime, parentId, quickXorHash, sha256Hash, remoteDriveId, remoteId, syncStatus)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13)
INSERT OR REPLACE INTO item (driveId, id, name, remoteName, type, eTag, cTag, mtime, parentId, quickXorHash, sha256Hash, remoteDriveId, remoteId, syncStatus, size)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15)
";
updateItemStmt = "
UPDATE item
SET name = ?3, type = ?4, eTag = ?5, cTag = ?6, mtime = ?7, parentId = ?8, quickXorHash = ?9, sha256Hash = ?10, remoteDriveId = ?11, remoteId = ?12, syncStatus = ?13
SET name = ?3, remoteName = ?4, type = ?5, eTag = ?6, cTag = ?7, mtime = ?8, parentId = ?9, quickXorHash = ?10, sha256Hash = ?11, remoteDriveId = ?12, remoteId = ?13, syncStatus = ?14, size = ?15
WHERE driveId = ?1 AND id = ?2
";
selectItemByIdStmt = "
@ -112,6 +244,11 @@ final class ItemDatabase
FROM item
WHERE driveId = ?1 AND id = ?2
";
selectItemByRemoteIdStmt = "
SELECT *
FROM item
WHERE remoteDriveId = ?1 AND remoteId = ?2
";
selectItemByParentIdStmt = "SELECT * FROM item WHERE driveId = ? AND parentId = ?";
deleteItemByIdStmt = "DELETE FROM item WHERE driveId = ? AND id = ?";
@ -119,17 +256,16 @@ final class ItemDatabase
databaseInitialised = true;
}
bool isDatabaseInitialised()
{
bool isDatabaseInitialised() {
return databaseInitialised;
}
void createTable()
{
void createTable() {
db.exec("CREATE TABLE item (
driveId TEXT NOT NULL,
id TEXT NOT NULL,
name TEXT NOT NULL,
remoteName TEXT,
type TEXT NOT NULL,
eTag TEXT,
cTag TEXT,
@ -141,6 +277,7 @@ final class ItemDatabase
remoteId TEXT,
deltaLink TEXT,
syncStatus TEXT,
size TEXT,
PRIMARY KEY (driveId, id),
FOREIGN KEY (driveId, parentId)
REFERENCES item (driveId, id)
@ -154,32 +291,27 @@ final class ItemDatabase
db.setVersion(itemDatabaseVersion);
}
void insert(const ref Item item)
{
void insert(const ref Item item) {
auto p = db.prepare(insertItemStmt);
bindItem(item, p);
p.exec();
}
void update(const ref Item item)
{
void update(const ref Item item) {
auto p = db.prepare(updateItemStmt);
bindItem(item, p);
p.exec();
}
void dump_open_statements()
{
void dump_open_statements() {
db.dump_open_statements();
}
int db_checkpoint()
{
int db_checkpoint() {
return db.db_checkpoint();
}
void upsert(const ref Item item)
{
void upsert(const ref Item item) {
auto s = db.prepare("SELECT COUNT(*) FROM item WHERE driveId = ? AND id = ?");
s.bind(1, item.driveId);
s.bind(2, item.id);
@ -191,8 +323,7 @@ final class ItemDatabase
stmt.exec();
}
Item[] selectChildren(const(char)[] driveId, const(char)[] id)
{
Item[] selectChildren(const(char)[] driveId, const(char)[] id) {
auto p = db.prepare(selectItemByParentIdStmt);
p.bind(1, driveId);
p.bind(2, id);
@ -205,8 +336,7 @@ final class ItemDatabase
return items;
}
bool selectById(const(char)[] driveId, const(char)[] id, out Item item)
{
bool selectById(const(char)[] driveId, const(char)[] id, out Item item) {
auto p = db.prepare(selectItemByIdStmt);
p.bind(1, driveId);
p.bind(2, id);
@ -218,9 +348,20 @@ final class ItemDatabase
return false;
}
bool selectByRemoteId(const(char)[] remoteDriveId, const(char)[] remoteId, out Item item) {
auto p = db.prepare(selectItemByRemoteIdStmt);
p.bind(1, remoteDriveId);
p.bind(2, remoteId);
auto r = p.exec();
if (!r.empty) {
item = buildItem(r);
return true;
}
return false;
}
// returns true if an item id is in the database
bool idInLocalDatabase(const(string) driveId, const(string)id)
{
bool idInLocalDatabase(const(string) driveId, const(string)id) {
auto p = db.prepare(selectItemByIdStmt);
p.bind(1, driveId);
p.bind(2, id);
@ -233,8 +374,7 @@ final class ItemDatabase
// returns the item with the given path
// the path is relative to the sync directory ex: "./Music/Turbo Killer.mp3"
bool selectByPath(const(char)[] path, string rootDriveId, out Item item)
{
bool selectByPath(const(char)[] path, string rootDriveId, out Item item) {
Item currItem = { driveId: rootDriveId };
// Issue https://github.com/abraunegg/onedrive/issues/578
@ -254,6 +394,7 @@ final class ItemDatabase
auto r = s.exec();
if (r.empty) return false;
currItem = buildItem(r);
// if the item is of type remote substitute it with the child
if (currItem.type == ItemType.remote) {
Item child;
@ -268,8 +409,7 @@ final class ItemDatabase
}
// same as selectByPath() but it does not traverse remote folders
bool selectByPathWithoutRemote(const(char)[] path, string rootDriveId, out Item item)
{
bool selectByPathWithoutRemote(const(char)[] path, string rootDriveId, out Item item) {
Item currItem = { driveId: rootDriveId };
// Issue https://github.com/abraunegg/onedrive/issues/578
@ -294,58 +434,80 @@ final class ItemDatabase
return true;
}
void deleteById(const(char)[] driveId, const(char)[] id)
{
void deleteById(const(char)[] driveId, const(char)[] id) {
auto p = db.prepare(deleteItemByIdStmt);
p.bind(1, driveId);
p.bind(2, id);
p.exec();
}
private void bindItem(const ref Item item, ref Statement stmt)
{
private void bindItem(const ref Item item, ref Statement stmt) {
with (stmt) with (item) {
bind(1, driveId);
bind(2, id);
bind(3, name);
bind(4, remoteName);
string typeStr = null;
final switch (type) with (ItemType) {
case file: typeStr = "file"; break;
case dir: typeStr = "dir"; break;
case remote: typeStr = "remote"; break;
case unknown: typeStr = "unknown"; break;
}
bind(4, typeStr);
bind(5, eTag);
bind(6, cTag);
bind(7, mtime.toISOExtString());
bind(8, parentId);
bind(9, quickXorHash);
bind(10, sha256Hash);
bind(11, remoteDriveId);
bind(12, remoteId);
bind(13, syncStatus);
bind(5, typeStr);
bind(6, eTag);
bind(7, cTag);
bind(8, mtime.toISOExtString());
bind(9, parentId);
bind(10, quickXorHash);
bind(11, sha256Hash);
bind(12, remoteDriveId);
bind(13, remoteId);
bind(14, syncStatus);
bind(15, size);
}
}
private Item buildItem(Statement.Result result)
{
private Item buildItem(Statement.Result result) {
assert(!result.empty, "The result must not be empty");
assert(result.front.length == 14, "The result must have 14 columns");
assert(result.front.length == 16, "The result must have 16 columns");
Item item = {
// column 0: driveId
// column 1: id
// column 2: name
// column 3: remoteName - only used when there is a difference in the local name & remote shared folder name
// column 4: type
// column 5: eTag
// column 6: cTag
// column 7: mtime
// column 8: parentId
// column 9: quickXorHash
// column 10: sha256Hash
// column 11: remoteDriveId
// column 12: remoteId
// column 13: deltaLink
// column 14: syncStatus
// column 15: size
driveId: result.front[0].dup,
id: result.front[1].dup,
name: result.front[2].dup,
eTag: result.front[4].dup,
cTag: result.front[5].dup,
mtime: SysTime.fromISOExtString(result.front[6]),
parentId: result.front[7].dup,
quickXorHash: result.front[8].dup,
sha256Hash: result.front[9].dup,
remoteDriveId: result.front[10].dup,
remoteId: result.front[11].dup,
syncStatus: result.front[12].dup
remoteName: result.front[3].dup,
// Column 4 is type - not set here
eTag: result.front[5].dup,
cTag: result.front[6].dup,
mtime: SysTime.fromISOExtString(result.front[7]),
parentId: result.front[8].dup,
quickXorHash: result.front[9].dup,
sha256Hash: result.front[10].dup,
remoteDriveId: result.front[11].dup,
remoteId: result.front[12].dup,
// Column 13 is deltaLink - not set here
syncStatus: result.front[14].dup,
size: result.front[15].dup
};
switch (result.front[3]) {
switch (result.front[4]) {
case "file": item.type = ItemType.file; break;
case "dir": item.type = ItemType.dir; break;
case "remote": item.type = ItemType.remote; break;
@ -357,8 +519,7 @@ final class ItemDatabase
// computes the path of the given item id
// the path is relative to the sync directory ex: "Music/Turbo Killer.mp3"
// the trailing slash is not added even if the item is a directory
string computePath(const(char)[] driveId, const(char)[] id)
{
string computePath(const(char)[] driveId, const(char)[] id) {
assert(driveId && id);
string path;
Item item;
@ -416,8 +577,7 @@ final class ItemDatabase
return path;
}
Item[] selectRemoteItems()
{
Item[] selectRemoteItems() {
Item[] items;
auto stmt = db.prepare("SELECT * FROM item WHERE remoteDriveId IS NOT NULL");
auto res = stmt.exec();
@ -428,8 +588,11 @@ final class ItemDatabase
return items;
}
string getDeltaLink(const(char)[] driveId, const(char)[] id)
{
string getDeltaLink(const(char)[] driveId, const(char)[] id) {
// Log what we received
log.vdebug("DeltaLink Query (driveId): ", driveId);
log.vdebug("DeltaLink Query (id): ", id);
assert(driveId && id);
auto stmt = db.prepare("SELECT deltaLink FROM item WHERE driveId = ?1 AND id = ?2");
stmt.bind(1, driveId);
@ -439,8 +602,7 @@ final class ItemDatabase
return res.front[0].dup;
}
void setDeltaLink(const(char)[] driveId, const(char)[] id, const(char)[] deltaLink)
{
void setDeltaLink(const(char)[] driveId, const(char)[] id, const(char)[] deltaLink) {
assert(driveId && id);
assert(deltaLink);
auto stmt = db.prepare("UPDATE item SET deltaLink = ?3 WHERE driveId = ?1 AND id = ?2");
@ -455,8 +617,7 @@ final class ItemDatabase
// As we query /children to get all children from OneDrive, update anything in the database
// to be flagged as not-in-sync, thus, we can use that flag to determing what was previously
// in-sync, but now deleted on OneDrive
void downgradeSyncStatusFlag(const(char)[] driveId, const(char)[] id)
{
void downgradeSyncStatusFlag(const(char)[] driveId, const(char)[] id) {
assert(driveId);
auto stmt = db.prepare("UPDATE item SET syncStatus = 'N' WHERE driveId = ?1 AND id = ?2");
stmt.bind(1, driveId);
@ -466,8 +627,7 @@ final class ItemDatabase
// National Cloud Deployments (US and DE) do not support /delta as a query
// Select items that have a out-of-sync flag set
Item[] selectOutOfSyncItems(const(char)[] driveId)
{
Item[] selectOutOfSyncItems(const(char)[] driveId) {
assert(driveId);
Item[] items;
auto stmt = db.prepare("SELECT * FROM item WHERE syncStatus = 'N' AND driveId = ?1");
@ -482,8 +642,7 @@ final class ItemDatabase
// OneDrive Business Folders are stored in the database potentially without a root | parentRoot link
// Select items associated with the provided driveId
Item[] selectByDriveId(const(char)[] driveId)
{
Item[] selectByDriveId(const(char)[] driveId) {
assert(driveId);
Item[] items;
auto stmt = db.prepare("SELECT * FROM item WHERE driveId = ?1 AND parentId IS NULL");
@ -496,9 +655,23 @@ final class ItemDatabase
return items;
}
// Select all items associated with the provided driveId
Item[] selectAllItemsByDriveId(const(char)[] driveId) {
assert(driveId);
Item[] items;
auto stmt = db.prepare("SELECT * FROM item WHERE driveId = ?1");
stmt.bind(1, driveId);
auto res = stmt.exec();
while (!res.empty) {
items ~= buildItem(res);
res.step();
}
return items;
}
// Perform a vacuum on the database, commit WAL / SHM to file
void performVacuum()
{
void performVacuum() {
log.vdebug("Attempting to perform a database vacuum to merge any temporary data");
try {
auto stmt = db.prepare("VACUUM;");
stmt.exec();
@ -510,8 +683,7 @@ final class ItemDatabase
}
// Select distinct driveId items from database
string[] selectDistinctDriveIds()
{
string[] selectDistinctDriveIds() {
string[] driveIdArray;
auto stmt = db.prepare("SELECT DISTINCT driveId FROM item;");
auto res = stmt.exec();
@ -522,4 +694,4 @@ final class ItemDatabase
}
return driveIdArray;
}
}
}

137
src/log.d
View file

@ -1,31 +1,47 @@
// What is this module called?
module log;
// What does this module require to function?
import std.stdio;
import std.file;
import std.datetime;
import std.process;
import std.conv;
import std.path;
import std.string;
import core.memory;
import core.sys.posix.pwd, core.sys.posix.unistd, core.stdc.string : strlen;
import core.sys.posix.pwd;
import core.sys.posix.unistd;
import core.stdc.string : strlen;
import std.algorithm : splitter;
version(Notifications) {
import dnotify;
}
// enable verbose logging
// module variables
// verbose logging count
long verbose;
// do we write a log file? ... this should be a config falue
bool writeLogFile = false;
// did the log file write fail?
bool logFileWriteFailFlag = false;
private bool doNotifications;
private bool triggerNotification;
// shared string variable for username
string username;
string logFilePath;
string logFileName;
string logFileFullPath;
void init(string logDir)
{
void initialise(string logDir) {
writeLogFile = true;
// Configure various variables
username = getUserName();
logFilePath = logDir;
logFileName = username ~ ".onedrive.log";
logFileFullPath = buildPath(logFilePath, logFileName);
if (!exists(logFilePath)){
// logfile path does not exist
@ -34,15 +50,19 @@ void init(string logDir)
}
catch (std.file.FileException e) {
// we got an error ..
writeln("\nUnable to access ", logFilePath);
writeln("Please manually create '",logFilePath, "' and set appropriate permissions to allow write access");
writeln("The requested client activity log will instead be located in your users home directory");
writeln();
writeln("ERROR: Unable to access ", logFilePath);
writeln("ERROR: Please manually create '",logFilePath, "' and set appropriate permissions to allow write access");
writeln("ERROR: The requested client activity log will instead be located in your users home directory");
writeln();
// set the flag so we dont keep printing this sort of message
logFileWriteFailFlag = true;
}
}
}
void setNotifications(bool value)
{
void enableNotifications(bool value) {
version(Notifications) {
// if we try to enable notifications, check for server availability
// and disable in case dbus server is not reachable
@ -54,11 +74,10 @@ void setNotifications(bool value)
}
}
}
doNotifications = value;
triggerNotification = value;
}
void log(T...)(T args)
{
void log(T...)(T args) {
writeln(args);
if(writeLogFile){
// Write to log file
@ -66,22 +85,19 @@ void log(T...)(T args)
}
}
void logAndNotify(T...)(T args)
{
void logAndNotify(T...)(T args) {
notify(args);
log(args);
}
void fileOnly(T...)(T args)
{
void fileOnly(T...)(T args) {
if(writeLogFile){
// Write to log file
logfileWriteLine(args);
}
}
void vlog(T...)(T args)
{
void vlog(T...)(T args) {
if (verbose >= 1) {
writeln(args);
if(writeLogFile){
@ -91,8 +107,7 @@ void vlog(T...)(T args)
}
}
void vdebug(T...)(T args)
{
void vdebug(T...)(T args) {
if (verbose >= 2) {
writeln("[DEBUG] ", args);
if(writeLogFile){
@ -102,8 +117,7 @@ void vdebug(T...)(T args)
}
}
void vdebugNewLine(T...)(T args)
{
void vdebugNewLine(T...)(T args) {
if (verbose >= 2) {
writeln("\n[DEBUG] ", args);
if(writeLogFile){
@ -113,8 +127,7 @@ void vdebugNewLine(T...)(T args)
}
}
void error(T...)(T args)
{
void error(T...)(T args) {
stderr.writeln(args);
if(writeLogFile){
// Write to log file
@ -122,16 +135,14 @@ void error(T...)(T args)
}
}
void errorAndNotify(T...)(T args)
{
void errorAndNotify(T...)(T args) {
notify(args);
error(args);
}
void notify(T...)(T args)
{
void notify(T...)(T args) {
version(Notifications) {
if (doNotifications) {
if (triggerNotification) {
string result;
foreach (index, arg; args) {
result ~= to!string(arg);
@ -153,45 +164,44 @@ void notify(T...)(T args)
}
}
private void logfileWriteLine(T...)(T args)
{
private void logfileWriteLine(T...)(T args) {
static import std.exception;
// Write to log file
string logFileName = .logFilePath ~ .username ~ ".onedrive.log";
auto currentTime = Clock.currTime();
auto timeString = currentTime.toString();
auto timeString = leftJustify(currentTime.toString(), 28, '0');
File logFile;
// Resolve: std.exception.ErrnoException@std/stdio.d(423): Cannot open file `/var/log/onedrive/xxxxx.onedrive.log' in mode `a' (Permission denied)
try {
logFile = File(logFileName, "a");
logFile = File(logFileFullPath, "a");
}
catch (std.exception.ErrnoException e) {
// We cannot open the log file in logFilePath location for writing
// We cannot open the log file logFileFullPath for writing
// The user is not part of the standard 'users' group (GID 100)
// Change logfile to ~/onedrive.log putting the log file in the users home directory
if (!logFileWriteFailFlag) {
// write out error message that we cant log to the requested file
writeln("\nUnable to write activity log to ", logFileName);
writeln("Please set appropriate permissions to allow write access to the logging directory for your user account");
writeln("The requested client activity log will instead be located in your users home directory\n");
writeln();
writeln("ERROR: Unable to write activity log to ", logFileFullPath);
writeln("ERROR: Please set appropriate permissions to allow write access to the logging directory for your user account");
writeln("ERROR: The requested client activity log will instead be located in your users home directory");
writeln();
// set the flag so we dont keep printing this error message
logFileWriteFailFlag = true;
}
string homePath = environment.get("HOME");
string logFileNameAlternate = homePath ~ "/onedrive.log";
logFile = File(logFileNameAlternate, "a");
string logFileFullPathAlternate = homePath ~ "/onedrive.log";
logFile = File(logFileFullPathAlternate, "a");
}
// Write to the log file
logFile.writeln(timeString, "\t", args);
logFile.close();
}
private string getUserName()
{
private string getUserName() {
auto pw = getpwuid(getuid);
// get required details
@ -216,24 +226,27 @@ private string getUserName()
}
}
void displayMemoryUsagePreGC()
{
// Display memory usage
writeln("\nMemory Usage pre GC (bytes)");
writeln("--------------------");
writeln("memory usedSize = ", GC.stats.usedSize);
writeln("memory freeSize = ", GC.stats.freeSize);
// uncomment this if required, if not using LDC 1.16 as this does not exist in that version
//writeln("memory allocatedInCurrentThread = ", GC.stats.allocatedInCurrentThread, "\n");
void displayMemoryUsagePreGC() {
// Display memory usage
writeln();
writeln("Memory Usage pre GC (KB)");
writeln("------------------------");
writeMemoryStats();
writeln();
}
void displayMemoryUsagePostGC()
{
// Display memory usage
writeln("\nMemory Usage post GC (bytes)");
writeln("--------------------");
writeln("memory usedSize = ", GC.stats.usedSize);
writeln("memory freeSize = ", GC.stats.freeSize);
// uncomment this if required, if not using LDC 1.16 as this does not exist in that version
//writeln("memory allocatedInCurrentThread = ", GC.stats.allocatedInCurrentThread, "\n");
void displayMemoryUsagePostGC() {
// Display memory usage
writeln();
writeln("Memory Usage post GC (KB)");
writeln("-------------------------");
writeMemoryStats();
writeln();
}
void writeMemoryStats() {
// write memory stats
writeln("memory usedSize = ", (GC.stats.usedSize/1024));
writeln("memory freeSize = ", (GC.stats.freeSize/1024));
writeln("memory allocatedInCurrentThread = ", (GC.stats.allocatedInCurrentThread/1024));
}

2696
src/main.d

File diff suppressed because it is too large Load diff

View file

@ -1,27 +1,49 @@
import core.sys.linux.sys.inotify;
import core.stdc.errno;
import core.sys.posix.poll, core.sys.posix.unistd;
import std.exception, std.file, std.path, std.regex, std.stdio, std.string, std.algorithm;
import core.stdc.stdlib;
import config;
import selective;
import util;
static import log;
// What is this module called?
module monitor;
// relevant inotify events
// What does this module require to function?
import core.stdc.errno;
import core.stdc.stdlib;
import core.sys.linux.sys.inotify;
import core.sys.posix.poll;
import core.sys.posix.unistd;
import std.algorithm;
import std.exception;
import std.file;
import std.path;
import std.regex;
import std.stdio;
import std.string;
import std.conv;
// What other modules that we have created do we need to import?
import config;
import util;
import log;
import clientSideFiltering;
// Relevant inotify events
private immutable uint32_t mask = IN_CLOSE_WRITE | IN_CREATE | IN_DELETE | IN_MOVE | IN_IGNORED | IN_Q_OVERFLOW;
class MonitorException: ErrnoException
{
@safe this(string msg, string file = __FILE__, size_t line = __LINE__)
{
class MonitorException: ErrnoException {
@safe this(string msg, string file = __FILE__, size_t line = __LINE__) {
super(msg, file, line);
}
}
final class Monitor
{
bool verbose;
final class Monitor {
// Class variables
ApplicationConfig appConfig;
ClientSideFiltering selectiveSync;
// Are we verbose in logging output
bool verbose = false;
// skip symbolic links
bool skip_symlinks = false;
// check for .nosync if enabled
bool check_nosync = false;
// Configure Private Class Variables
// inotify file descriptor
private int fd;
// map every inotify watch descriptor to its directory
@ -30,29 +52,27 @@ final class Monitor
private string[int] cookieToPath;
// buffer to receive the inotify events
private void[] buffer;
// skip symbolic links
bool skip_symlinks;
// check for .nosync if enabled
bool check_nosync;
private SelectiveSync selectiveSync;
// Configure function delegates
void delegate(string path) onDirCreated;
void delegate(string path) onFileChanged;
void delegate(string path) onDelete;
void delegate(string from, string to) onMove;
this(SelectiveSync selectiveSync)
{
assert(selectiveSync);
// Configure the class varaible to consume the application configuration including selective sync
this(ApplicationConfig appConfig, ClientSideFiltering selectiveSync) {
this.appConfig = appConfig;
this.selectiveSync = selectiveSync;
}
void init(Config cfg, bool verbose, bool skip_symlinks, bool check_nosync)
{
this.verbose = verbose;
this.skip_symlinks = skip_symlinks;
this.check_nosync = check_nosync;
// Initialise the monitor class
void initialise() {
// Configure the variables
skip_symlinks = appConfig.getValueBool("skip_symlinks");
check_nosync = appConfig.getValueBool("check_nosync");
if (appConfig.getValueLong("verbose") > 0) {
verbose = true;
}
assert(onDirCreated && onFileChanged && onDelete && onMove);
fd = inotify_init();
@ -61,9 +81,9 @@ final class Monitor
// from which point do we start watching for changes?
string monitorPath;
if (cfg.getValueString("single_directory") != ""){
// single directory in use, monitor only this
monitorPath = "./" ~ cfg.getValueString("single_directory");
if (appConfig.getValueString("single_directory") != ""){
// single directory in use, monitor only this path
monitorPath = "./" ~ appConfig.getValueString("single_directory");
} else {
// default
monitorPath = ".";
@ -71,14 +91,14 @@ final class Monitor
addRecursive(monitorPath);
}
void shutdown()
{
// Shutdown the monitor class
void shutdown() {
if (fd > 0) close(fd);
wdToDirName = null;
}
private void addRecursive(string dirname)
{
// Recursivly add this path to be monitored
private void addRecursive(string dirname) {
// skip non existing/disappeared items
if (!exists(dirname)) {
log.vlog("Not adding non-existing/disappeared directory: ", dirname);
@ -173,19 +193,21 @@ final class Monitor
}
}
private void add(string pathname)
{
// Add this path to be monitored
private void add(string pathname) {
int wd = inotify_add_watch(fd, toStringz(pathname), mask);
if (wd < 0) {
if (errno() == ENOSPC) {
// Get the current value
ulong maxInotifyWatches = to!int(strip(readText("/proc/sys/fs/inotify/max_user_watches")));
log.log("The user limit on the total number of inotify watches has been reached.");
log.log("To see the current max number of watches run:");
log.log("sysctl fs.inotify.max_user_watches");
log.log("To change the current max number of watches to 524288 run:");
log.log("sudo sysctl fs.inotify.max_user_watches=524288");
log.log("Your current limit of inotify watches is: ", maxInotifyWatches);
log.log("It is recommended that you change the max number of inotify watches to at least double your existing value.");
log.log("To change the current max number of watches to " , (maxInotifyWatches * 2) , " run:");
log.log("EXAMPLE: sudo sysctl fs.inotify.max_user_watches=", (maxInotifyWatches * 2));
}
if (errno() == 13) {
if ((selectiveSync.getSkipDotfiles()) && (selectiveSync.isDotFile(pathname))) {
if ((selectiveSync.getSkipDotfiles()) && (isDotFile(pathname))) {
// no misleading output that we could not add a watch due to permission denied
return;
} else {
@ -206,18 +228,17 @@ final class Monitor
if (isDir(pathname)) {
// This is a directory
// is the path exluded if skip_dotfiles configured and path is a .folder?
if ((selectiveSync.getSkipDotfiles()) && (selectiveSync.isDotFile(pathname))) {
if ((selectiveSync.getSkipDotfiles()) && (isDotFile(pathname))) {
// no misleading output that we are monitoring this directory
return;
}
// Log that this is directory is being monitored
log.vlog("Monitor directory: ", pathname);
log.vlog("Monitoring directory: ", pathname);
}
}
// remove a watch descriptor
private void remove(int wd)
{
// Remove a watch descriptor
private void remove(int wd) {
assert(wd in wdToDirName);
int ret = inotify_rm_watch(fd, wd);
if (ret < 0) throw new MonitorException("inotify_rm_watch failed");
@ -225,9 +246,8 @@ final class Monitor
wdToDirName.remove(wd);
}
// remove the watch descriptors associated to the given path
private void remove(const(char)[] path)
{
// Remove the watch descriptors associated to the given path
private void remove(const(char)[] path) {
path ~= "/";
foreach (wd, dirname; wdToDirName) {
if (dirname.startsWith(path)) {
@ -239,17 +259,17 @@ final class Monitor
}
}
// return the file path from an inotify event
private string getPath(const(inotify_event)* event)
{
// Return the file path from an inotify event
private string getPath(const(inotify_event)* event) {
string path = wdToDirName[event.wd];
if (event.len > 0) path ~= fromStringz(event.name.ptr);
log.vdebug("inotify path event for: ", path);
return path;
}
void update(bool useCallbacks = true)
{
// Update
void update(bool useCallbacks = true) {
pollfd fds = {
fd: fd,
events: POLLIN
@ -386,6 +406,8 @@ final class Monitor
remove(path);
cookieToPath.remove(cookie);
}
// Debug Log that all inotify events are flushed
log.vdebug("inotify events flushed");
}
}
}

File diff suppressed because it is too large Load diff

View file

@ -1,5 +1,7 @@
// What is this module called?
module progress;
// What does this module require to function?
import std.stdio;
import std.range;
import std.format;
@ -7,6 +9,8 @@ import std.datetime;
import core.sys.posix.unistd;
import core.sys.posix.sys.ioctl;
// What other modules that we have created do we need to import?
class Progress
{
private:

View file

@ -1,7 +1,11 @@
// What is this module called?
module qxor;
// What does this module require to function?
import std.algorithm;
import std.digest;
// implementation of the QuickXorHash algorithm in D
// Implementation of the QuickXorHash algorithm in D
// https://github.com/OneDrive/onedrive-api-docs/blob/live/docs/code-snippets/quickxorhash.md
struct QuickXor
{
@ -71,18 +75,4 @@ struct QuickXor
}
return tmp;
}
}
unittest
{
assert(isDigest!QuickXor);
}
unittest
{
QuickXor qxor;
qxor.put(cast(ubyte[]) "The quick brown fox jumps over the lazy dog");
assert(qxor.finish().toHexString() == "6CC4A56F2B26C492FA4BBE57C1F31C4193A972BE");
}
alias QuickXorDigest = WrapperDigest!(QuickXor);
}

View file

@ -1,422 +0,0 @@
import std.algorithm;
import std.array;
import std.file;
import std.path;
import std.regex;
import std.stdio;
import std.string;
import util;
import log;
final class SelectiveSync
{
private string[] paths;
private string[] businessSharedFoldersList;
private Regex!char mask;
private Regex!char dirmask;
private bool skipDirStrictMatch = false;
private bool skipDotfiles = false;
// load sync_list file
void load(string filepath)
{
if (exists(filepath)) {
// open file as read only
auto file = File(filepath, "r");
auto range = file.byLine();
foreach (line; range) {
// Skip comments in file
if (line.length == 0 || line[0] == ';' || line[0] == '#') continue;
paths ~= buildNormalizedPath(line);
}
file.close();
}
}
// Configure skipDirStrictMatch if function is called
// By default, skipDirStrictMatch = false;
void setSkipDirStrictMatch()
{
skipDirStrictMatch = true;
}
// load business_shared_folders file
void loadSharedFolders(string filepath)
{
if (exists(filepath)) {
// open file as read only
auto file = File(filepath, "r");
auto range = file.byLine();
foreach (line; range) {
// Skip comments in file
if (line.length == 0 || line[0] == ';' || line[0] == '#') continue;
businessSharedFoldersList ~= buildNormalizedPath(line);
}
file.close();
}
}
void setFileMask(const(char)[] mask)
{
this.mask = wild2regex(mask);
}
void setDirMask(const(char)[] dirmask)
{
this.dirmask = wild2regex(dirmask);
}
// Configure skipDotfiles if function is called
// By default, skipDotfiles = false;
void setSkipDotfiles()
{
skipDotfiles = true;
}
// return value of skipDotfiles
bool getSkipDotfiles()
{
return skipDotfiles;
}
// config file skip_dir parameter
bool isDirNameExcluded(string name)
{
// Does the directory name match skip_dir config entry?
// Returns true if the name matches a skip_dir config entry
// Returns false if no match
log.vdebug("skip_dir evaluation for: ", name);
// Try full path match first
if (!name.matchFirst(dirmask).empty) {
log.vdebug("'!name.matchFirst(dirmask).empty' returned true = matched");
return true;
} else {
// Do we check the base name as well?
if (!skipDirStrictMatch) {
log.vdebug("No Strict Matching Enforced");
// Test the entire path working backwards from child
string path = buildNormalizedPath(name);
string checkPath;
auto paths = pathSplitter(path);
foreach_reverse(directory; paths) {
if (directory != "/") {
// This will add a leading '/' but that needs to be stripped to check
checkPath = "/" ~ directory ~ checkPath;
if(!checkPath.strip('/').matchFirst(dirmask).empty) {
log.vdebug("'!checkPath.matchFirst(dirmask).empty' returned true = matched");
return true;
}
}
}
} else {
log.vdebug("Strict Matching Enforced - No Match");
}
}
// no match
return false;
}
// config file skip_file parameter
bool isFileNameExcluded(string name)
{
// Does the file name match skip_file config entry?
// Returns true if the name matches a skip_file config entry
// Returns false if no match
log.vdebug("skip_file evaluation for: ", name);
// Try full path match first
if (!name.matchFirst(mask).empty) {
return true;
} else {
// check just the file name
string filename = baseName(name);
if(!filename.matchFirst(mask).empty) {
return true;
}
}
// no match
return false;
}
// Match against sync_list only
bool isPathExcludedViaSyncList(string path)
{
// Debug output that we are performing a 'sync_list' inclusion / exclusion test
return .isPathExcluded(path, paths);
}
// Match against skip_dir, skip_file & sync_list entries
bool isPathExcludedMatchAll(string path)
{
return .isPathExcluded(path, paths) || .isPathMatched(path, mask) || .isPathMatched(path, dirmask);
}
// is the path a dotfile?
bool isDotFile(string path)
{
// always allow the root
if (path == ".") return false;
path = buildNormalizedPath(path);
auto paths = pathSplitter(path);
foreach(base; paths) {
if (startsWith(base, ".")){
return true;
}
}
return false;
}
// is business shared folder matched
bool isSharedFolderMatched(string name)
{
// if there are no shared folder always return false
if (businessSharedFoldersList.empty) return false;
if (!name.matchFirst(businessSharedFoldersList).empty) {
return true;
} else {
// try a direct comparison just in case
foreach (userFolder; businessSharedFoldersList) {
if (userFolder == name) {
// direct match
log.vdebug("'matchFirst' failed to match, however direct comparison was matched: ", name);
return true;
}
}
return false;
}
}
// is business shared folder included
bool isPathIncluded(string path, string[] allowedPaths)
{
// always allow the root
if (path == ".") return true;
// if there are no allowed paths always return true
if (allowedPaths.empty) return true;
path = buildNormalizedPath(path);
foreach (allowed; allowedPaths) {
auto comm = commonPrefix(path, allowed);
if (comm.length == path.length) {
// the given path is contained in an allowed path
return true;
}
if (comm.length == allowed.length && path[comm.length] == '/') {
// the given path is a subitem of an allowed path
return true;
}
}
return false;
}
}
// test if the given path is not included in the allowed paths
// if there are no allowed paths always return false
private bool isPathExcluded(string path, string[] allowedPaths)
{
// function variables
bool exclude = false;
bool exludeDirectMatch = false; // will get updated to true, if there is a pattern match to sync_list entry
bool excludeMatched = false; // will get updated to true, if there is a pattern match to sync_list entry
bool finalResult = true; // will get updated to false, if pattern match to sync_list entry
int offset;
string wildcard = "*";
// always allow the root
if (path == ".") return false;
// if there are no allowed paths always return false
if (allowedPaths.empty) return false;
path = buildNormalizedPath(path);
log.vdebug("Evaluation against 'sync_list' for this path: ", path);
log.vdebug("[S]exclude = ", exclude);
log.vdebug("[S]exludeDirectMatch = ", exludeDirectMatch);
log.vdebug("[S]excludeMatched = ", excludeMatched);
// unless path is an exact match, entire sync_list entries need to be processed to ensure
// negative matches are also correctly detected
foreach (allowedPath; allowedPaths) {
// is this an inclusion path or finer grained exclusion?
switch (allowedPath[0]) {
case '-':
// sync_list path starts with '-', this user wants to exclude this path
exclude = true;
// If the sync_list entry starts with '-/' offset needs to be 2, else 1
if (startsWith(allowedPath, "-/")){
// Offset needs to be 2
offset = 2;
} else {
// Offset needs to be 1
offset = 1;
}
break;
case '!':
// sync_list path starts with '!', this user wants to exclude this path
exclude = true;
// If the sync_list entry starts with '!/' offset needs to be 2, else 1
if (startsWith(allowedPath, "!/")){
// Offset needs to be 2
offset = 2;
} else {
// Offset needs to be 1
offset = 1;
}
break;
case '/':
// sync_list path starts with '/', this user wants to include this path
// but a '/' at the start causes matching issues, so use the offset for comparison
exclude = false;
offset = 1;
break;
default:
// no negative pattern, default is to not exclude
exclude = false;
offset = 0;
}
// What are we comparing against?
log.vdebug("Evaluation against 'sync_list' entry: ", allowedPath);
// Generate the common prefix from the path vs the allowed path
auto comm = commonPrefix(path, allowedPath[offset..$]);
// Is path is an exact match of the allowed path?
if (comm.length == path.length) {
// we have a potential exact match
// strip any potential '/*' from the allowed path, to avoid a potential lesser common match
string strippedAllowedPath = strip(allowedPath[offset..$], "/*");
if (path == strippedAllowedPath) {
// we have an exact path match
log.vdebug("exact path match");
if (!exclude) {
log.vdebug("Evaluation against 'sync_list' result: direct match");
finalResult = false;
// direct match, break and go sync
break;
} else {
log.vdebug("Evaluation against 'sync_list' result: direct match - path to be excluded");
// do not set excludeMatched = true here, otherwise parental path also gets excluded
// flag exludeDirectMatch so that a 'wildcard match' will not override this exclude
exludeDirectMatch = true;
// final result
finalResult = true;
}
} else {
// no exact path match, but something common does match
log.vdebug("something 'common' matches the input path");
auto splitAllowedPaths = pathSplitter(strippedAllowedPath);
string pathToEvaluate = "";
foreach(base; splitAllowedPaths) {
pathToEvaluate ~= base;
if (path == pathToEvaluate) {
// The input path matches what we want to evaluate against as a direct match
if (!exclude) {
log.vdebug("Evaluation against 'sync_list' result: direct match for parental path item");
finalResult = false;
// direct match, break and go sync
break;
} else {
log.vdebug("Evaluation against 'sync_list' result: direct match for parental path item but to be excluded");
finalResult = true;
// do not set excludeMatched = true here, otherwise parental path also gets excluded
}
}
pathToEvaluate ~= dirSeparator;
}
}
}
// Is path is a subitem/sub-folder of the allowed path?
if (comm.length == allowedPath[offset..$].length) {
// The given path is potentially a subitem of an allowed path
// We want to capture sub-folders / files of allowed paths here, but not explicitly match other items
// if there is no wildcard
auto subItemPathCheck = allowedPath[offset..$] ~ "/";
if (canFind(path, subItemPathCheck)) {
// The 'path' includes the allowed path, and is 'most likely' a sub-path item
if (!exclude) {
log.vdebug("Evaluation against 'sync_list' result: parental path match");
finalResult = false;
// parental path matches, break and go sync
break;
} else {
log.vdebug("Evaluation against 'sync_list' result: parental path match but must be excluded");
finalResult = true;
excludeMatched = true;
}
}
}
// Does the allowed path contain a wildcard? (*)
if (canFind(allowedPath[offset..$], wildcard)) {
// allowed path contains a wildcard
// manually replace '*' for '.*' to be compatible with regex
string regexCompatiblePath = replace(allowedPath[offset..$], "*", ".*");
auto allowedMask = regex(regexCompatiblePath);
if (matchAll(path, allowedMask)) {
// regex wildcard evaluation matches
// if we have a prior pattern match for an exclude, excludeMatched = true
if (!exclude && !excludeMatched && !exludeDirectMatch) {
// nothing triggered an exclusion before evaluation against wildcard match attempt
log.vdebug("Evaluation against 'sync_list' result: wildcard pattern match");
finalResult = false;
} else {
log.vdebug("Evaluation against 'sync_list' result: wildcard pattern matched but must be excluded");
finalResult = true;
excludeMatched = true;
}
}
}
}
// Interim results
log.vdebug("[F]exclude = ", exclude);
log.vdebug("[F]exludeDirectMatch = ", exludeDirectMatch);
log.vdebug("[F]excludeMatched = ", excludeMatched);
// If exclude or excludeMatched is true, then finalResult has to be true
if ((exclude) || (excludeMatched) || (exludeDirectMatch)) {
finalResult = true;
}
// results
if (finalResult) {
log.vdebug("Evaluation against 'sync_list' final result: EXCLUDED");
} else {
log.vdebug("Evaluation against 'sync_list' final result: included for sync");
}
return finalResult;
}
// test if the given path is matched by the regex expression.
// recursively test up the tree.
private bool isPathMatched(string path, Regex!char mask) {
path = buildNormalizedPath(path);
auto paths = pathSplitter(path);
string prefix = "";
foreach(base; paths) {
prefix ~= base;
if (!path.matchFirst(mask).empty) {
// the given path matches something which we should skip
return true;
}
prefix ~= dirSeparator;
}
return false;
}
// unit tests
unittest
{
assert(isPathExcluded("Documents2", ["Documents"]));
assert(!isPathExcluded("Documents", ["Documents"]));
assert(!isPathExcluded("Documents/a.txt", ["Documents"]));
assert(isPathExcluded("Hello/World", ["Hello/John"]));
assert(!isPathExcluded(".", ["Documents"]));
}

View file

@ -1,27 +1,29 @@
// What is this module called?
module sqlite;
// What does this module require to function?
import std.stdio;
import etc.c.sqlite3;
import std.string: fromStringz, toStringz;
import core.stdc.stdlib;
import std.conv;
static import log;
// What other modules that we have created do we need to import?
import log;
extern (C) immutable(char)* sqlite3_errstr(int); // missing from the std library
static this()
{
static this() {
if (sqlite3_libversion_number() < 3006019) {
throw new SqliteException("sqlite 3.6.19 or newer is required");
}
}
private string ifromStringz(const(char)* cstr)
{
private string ifromStringz(const(char)* cstr) {
return fromStringz(cstr).dup;
}
class SqliteException: Exception
{
class SqliteException: Exception {
@safe pure nothrow this(string msg, string file = __FILE__, size_t line = __LINE__, Throwable next = null)
{
super(msg, file, line, next);
@ -33,28 +35,23 @@ class SqliteException: Exception
}
}
struct Database
{
struct Database {
private sqlite3* pDb;
this(const(char)[] filename)
{
this(const(char)[] filename) {
open(filename);
}
~this()
{
~this() {
close();
}
int db_checkpoint()
{
int db_checkpoint() {
return sqlite3_wal_checkpoint(pDb, null);
}
void dump_open_statements()
{
log.log("Dumpint open statements: \n");
void dump_open_statements() {
log.log("Dumping open statements: \n");
auto p = sqlite3_next_stmt(pDb, null);
while (p != null) {
log.log (" - " ~ ifromStringz(sqlite3_sql(p)) ~ "\n");
@ -63,13 +60,12 @@ struct Database
}
void open(const(char)[] filename)
{
void open(const(char)[] filename) {
// https://www.sqlite.org/c3ref/open.html
int rc = sqlite3_open(toStringz(filename), &pDb);
if (rc == SQLITE_CANTOPEN) {
// Database cannot be opened
log.error("\nThe database cannot be opened. Please check the permissions of ~/.config/onedrive/items.sqlite3\n");
log.error("\nThe database cannot be opened. Please check the permissions of " ~ filename ~ "\n");
close();
exit(-1);
}
@ -81,8 +77,7 @@ struct Database
sqlite3_extended_result_codes(pDb, 1); // always use extended result codes
}
void exec(const(char)[] sql)
{
void exec(const(char)[] sql) {
// https://www.sqlite.org/c3ref/exec.html
int rc = sqlite3_exec(pDb, toStringz(sql), null, null, null);
if (rc != SQLITE_OK) {
@ -93,8 +88,7 @@ struct Database
}
}
int getVersion()
{
int getVersion() {
int userVersion;
extern (C) int callback(void* user_version, int count, char** column_text, char** column_name) {
import core.stdc.stdlib: atoi;
@ -108,19 +102,16 @@ struct Database
return userVersion;
}
string getErrorMessage()
{
string getErrorMessage() {
return ifromStringz(sqlite3_errmsg(pDb));
}
void setVersion(int userVersion)
{
void setVersion(int userVersion) {
import std.conv: to;
exec("PRAGMA user_version=" ~ to!string(userVersion));
}
Statement prepare(const(char)[] zSql)
{
Statement prepare(const(char)[] zSql) {
Statement s;
// https://www.sqlite.org/c3ref/prepare.html
int rc = sqlite3_prepare_v2(pDb, zSql.ptr, cast(int) zSql.length, &s.pStmt, null);
@ -130,41 +121,34 @@ struct Database
return s;
}
void close()
{
void close() {
// https://www.sqlite.org/c3ref/close.html
sqlite3_close_v2(pDb);
pDb = null;
}
}
struct Statement
{
struct Result
{
struct Statement {
struct Result {
private sqlite3_stmt* pStmt;
private const(char)[][] row;
private this(sqlite3_stmt* pStmt)
{
private this(sqlite3_stmt* pStmt) {
this.pStmt = pStmt;
step(); // initialize the range
}
@property bool empty()
{
@property bool empty() {
return row.length == 0;
}
@property auto front()
{
@property auto front() {
return row;
}
alias step popFront;
void step()
{
void step() {
// https://www.sqlite.org/c3ref/step.html
int rc = sqlite3_step(pStmt);
if (rc == SQLITE_BUSY) {
@ -194,14 +178,12 @@ struct Statement
private sqlite3_stmt* pStmt;
~this()
{
~this() {
// https://www.sqlite.org/c3ref/finalize.html
sqlite3_finalize(pStmt);
}
void bind(int index, const(char)[] value)
{
void bind(int index, const(char)[] value) {
reset();
// https://www.sqlite.org/c3ref/bind_blob.html
int rc = sqlite3_bind_text(pStmt, index, value.ptr, cast(int) value.length, SQLITE_STATIC);
@ -210,47 +192,16 @@ struct Statement
}
}
Result exec()
{
Result exec() {
reset();
return Result(pStmt);
}
private void reset()
{
private void reset() {
// https://www.sqlite.org/c3ref/reset.html
int rc = sqlite3_reset(pStmt);
if (rc != SQLITE_OK) {
throw new SqliteException(ifromStringz(sqlite3_errmsg(sqlite3_db_handle(pStmt))));
}
}
}
unittest
{
auto db = Database(":memory:");
db.exec("CREATE TABLE test(
id TEXT PRIMARY KEY,
value TEXT
)");
assert(db.getVersion() == 0);
db.setVersion(1);
assert(db.getVersion() == 1);
auto s = db.prepare("INSERT INTO test VALUES (?, ?)");
s.bind(1, "key1");
s.bind(2, "value");
s.exec();
s.bind(1, "key2");
s.bind(2, null);
s.exec();
s = db.prepare("SELECT * FROM test ORDER BY id ASC");
auto r = s.exec();
assert(r.front[0] == "key1");
r.popFront();
assert(r.front[1] == null);
r.popFront();
assert(r.empty);
}
}

12660
src/sync.d

File diff suppressed because it is too large Load diff

View file

@ -1,302 +0,0 @@
import std.algorithm, std.conv, std.datetime, std.file, std.json;
import std.stdio, core.thread, std.string;
import progress, onedrive, util;
static import log;
private long fragmentSize = 10 * 2^^20; // 10 MiB
struct UploadSession
{
private OneDriveApi onedrive;
private bool verbose;
// https://dev.onedrive.com/resources/uploadSession.htm
private JSONValue session;
// path where to save the session
private string sessionFilePath;
this(OneDriveApi onedrive, string sessionFilePath)
{
assert(onedrive);
this.onedrive = onedrive;
this.sessionFilePath = sessionFilePath;
this.verbose = verbose;
}
JSONValue upload(string localPath, const(char)[] parentDriveId, const(char)[] parentId, const(char)[] filename, const(char)[] eTag = null)
{
// Fix https://github.com/abraunegg/onedrive/issues/2
// More Details https://github.com/OneDrive/onedrive-api-docs/issues/778
SysTime localFileLastModifiedTime = timeLastModified(localPath).toUTC();
localFileLastModifiedTime.fracSecs = Duration.zero;
JSONValue fileSystemInfo = [
"item": JSONValue([
"@name.conflictBehavior": JSONValue("replace"),
"fileSystemInfo": JSONValue([
"lastModifiedDateTime": localFileLastModifiedTime.toISOExtString()
])
])
];
// Try to create the upload session for this file
session = onedrive.createUploadSession(parentDriveId, parentId, filename, eTag, fileSystemInfo);
if ("uploadUrl" in session){
session["localPath"] = localPath;
save();
return upload();
} else {
// there was an error
log.vlog("Create file upload session failed ... skipping file upload");
// return upload() will return a JSONValue response, create an empty JSONValue response to return
JSONValue response;
return response;
}
}
/* Restore the previous upload session.
* Returns true if the session is valid. Call upload() to resume it.
* Returns false if there is no session or the session is expired. */
bool restore()
{
if (exists(sessionFilePath)) {
log.vlog("Trying to restore the upload session ...");
// We cant use JSONType.object check, as this is currently a string
// We cant use a try & catch block, as it does not catch std.json.JSONException
auto sessionFileText = readText(sessionFilePath);
if(canFind(sessionFileText,"@odata.context")) {
session = readText(sessionFilePath).parseJSON();
} else {
log.vlog("Upload session resume data is invalid");
remove(sessionFilePath);
return false;
}
// Check the session resume file for expirationDateTime
if ("expirationDateTime" in session){
// expirationDateTime in the file
auto expiration = SysTime.fromISOExtString(session["expirationDateTime"].str);
if (expiration < Clock.currTime()) {
log.vlog("The upload session is expired");
return false;
}
if (!exists(session["localPath"].str)) {
log.vlog("The file does not exist anymore");
return false;
}
// Can we read the file - as a permissions issue or file corruption will cause a failure on resume
// https://github.com/abraunegg/onedrive/issues/113
if (readLocalFile(session["localPath"].str)){
// able to read the file
// request the session status
JSONValue response;
try {
response = onedrive.requestUploadStatus(session["uploadUrl"].str);
} catch (OneDriveException e) {
// handle any onedrive error response
if (e.httpStatusCode == 400) {
log.vlog("Upload session not found");
return false;
}
}
// do we have a valid response from OneDrive?
if (response.type() == JSONType.object){
// JSON object
if (("expirationDateTime" in response) && ("nextExpectedRanges" in response)){
// has the elements we need
session["expirationDateTime"] = response["expirationDateTime"];
session["nextExpectedRanges"] = response["nextExpectedRanges"];
if (session["nextExpectedRanges"].array.length == 0) {
log.vlog("The upload session is completed");
return false;
}
} else {
// bad data
log.vlog("Restore file upload session failed - invalid data response from OneDrive");
if (exists(sessionFilePath)) {
remove(sessionFilePath);
}
return false;
}
} else {
// not a JSON object
log.vlog("Restore file upload session failed - invalid response from OneDrive");
if (exists(sessionFilePath)) {
remove(sessionFilePath);
}
return false;
}
return true;
} else {
// unable to read the local file
log.vlog("Restore file upload session failed - unable to read the local file");
if (exists(sessionFilePath)) {
remove(sessionFilePath);
}
return false;
}
} else {
// session file contains an error - cant resume
log.vlog("Restore file upload session failed - cleaning up session resume");
if (exists(sessionFilePath)) {
remove(sessionFilePath);
}
return false;
}
}
return false;
}
JSONValue upload()
{
// Response for upload
JSONValue response;
// session JSON needs to contain valid elements
long offset;
long fileSize;
if ("nextExpectedRanges" in session){
offset = session["nextExpectedRanges"][0].str.splitter('-').front.to!long;
}
if ("localPath" in session){
fileSize = getSize(session["localPath"].str);
}
if ("uploadUrl" in session){
// Upload file via session created
// Upload Progress Bar
size_t iteration = (roundTo!int(double(fileSize)/double(fragmentSize)))+1;
Progress p = new Progress(iteration);
p.title = "Uploading";
long fragmentCount = 0;
long fragSize = 0;
// Initialise the download bar at 0%
p.next();
while (true) {
fragmentCount++;
log.vdebugNewLine("Fragment: ", fragmentCount, " of ", iteration);
p.next();
log.vdebugNewLine("fragmentSize: ", fragmentSize, "offset: ", offset, " fileSize: ", fileSize );
fragSize = fragmentSize < fileSize - offset ? fragmentSize : fileSize - offset;
log.vdebugNewLine("Using fragSize: ", fragSize);
// fragSize must not be a negative value
if (fragSize < 0) {
// Session upload will fail
// not a JSON object - fragment upload failed
log.vlog("File upload session failed - invalid calculation of fragment size");
if (exists(sessionFilePath)) {
remove(sessionFilePath);
}
// set response to null as error
response = null;
return response;
}
// If the resume upload fails, we need to check for a return code here
try {
response = onedrive.uploadFragment(
session["uploadUrl"].str,
session["localPath"].str,
offset,
fragSize,
fileSize
);
} catch (OneDriveException e) {
// if a 100 response is generated, continue
if (e.httpStatusCode == 100) {
continue;
}
// there was an error response from OneDrive when uploading the file fragment
// handle 'HTTP request returned status code 429 (Too Many Requests)' first
if (e.httpStatusCode == 429) {
auto retryAfterValue = onedrive.getRetryAfterValue();
log.vdebug("Fragment upload failed - received throttle request response from OneDrive");
log.vdebug("Using Retry-After Value = ", retryAfterValue);
// Sleep thread as per request
log.log("\nThread sleeping due to 'HTTP request returned status code 429' - The request has been throttled");
log.log("Sleeping for ", retryAfterValue, " seconds");
Thread.sleep(dur!"seconds"(retryAfterValue));
log.log("Retrying fragment upload");
} else {
// insert a new line as well, so that the below error is inserted on the console in the right location
log.vlog("\nFragment upload failed - received an exception response from OneDrive");
// display what the error is
displayOneDriveErrorMessage(e.msg, getFunctionName!({}));
// retry fragment upload in case error is transient
log.vlog("Retrying fragment upload");
}
try {
response = onedrive.uploadFragment(
session["uploadUrl"].str,
session["localPath"].str,
offset,
fragSize,
fileSize
);
} catch (OneDriveException e) {
// OneDrive threw another error on retry
log.vlog("Retry to upload fragment failed");
// display what the error is
displayOneDriveErrorMessage(e.msg, getFunctionName!({}));
// set response to null as the fragment upload was in error twice
response = null;
}
}
// was the fragment uploaded without issue?
if (response.type() == JSONType.object){
offset += fragmentSize;
if (offset >= fileSize) break;
// update the session details
session["expirationDateTime"] = response["expirationDateTime"];
session["nextExpectedRanges"] = response["nextExpectedRanges"];
save();
} else {
// not a JSON object - fragment upload failed
log.vlog("File upload session failed - invalid response from OneDrive");
if (exists(sessionFilePath)) {
remove(sessionFilePath);
}
// set response to null as error
response = null;
return response;
}
}
// upload complete
p.next();
writeln();
if (exists(sessionFilePath)) {
remove(sessionFilePath);
}
return response;
} else {
// session elements were not present
log.vlog("Session has no valid upload URL ... skipping this file upload");
// return an empty JSON response
response = null;
return response;
}
}
string getUploadSessionLocalFilePath() {
// return the session file path
string localPath = "";
if ("localPath" in session){
localPath = session["localPath"].str;
}
return localPath;
}
// save session details to temp file
private void save()
{
std.file.write(sessionFilePath, session.toString());
}
}

View file

@ -1,6 +1,12 @@
// What is this module called?
module util;
// What does this module require to function?
import core.stdc.stdlib: EXIT_SUCCESS, EXIT_FAILURE, exit;
import std.base64;
import std.conv;
import std.digest.crc, std.digest.sha;
import std.digest.crc;
import std.digest.sha;
import std.net.curl;
import std.datetime;
import std.file;
@ -13,22 +19,24 @@ import std.algorithm;
import std.uri;
import std.json;
import std.traits;
import qxor;
import core.stdc.stdlib;
import core.thread;
// What other modules that we have created do we need to import?
import log;
import config;
import qxor;
import curlEngine;
// module variables
shared string deviceName;
static this()
{
static this() {
deviceName = Socket.hostName;
}
// gives a new name to the specified file or directory
void safeRename(const(char)[] path)
{
// Creates a safe backup of the given item, and only performs the function if not in a --dry-run scenario
void safeBackup(const(char)[] path, bool dryRun) {
auto ext = extension(path);
auto newPath = path.chomp(ext) ~ "-" ~ deviceName;
if (exists(newPath ~ ext)) {
@ -41,18 +49,55 @@ void safeRename(const(char)[] path)
newPath = newPath2;
}
newPath ~= ext;
rename(path, newPath);
// Perform the backup
log.vlog("The local item is out-of-sync with OneDrive, renaming to preserve existing file and prevent data loss: ", path, " -> ", newPath);
if (!dryRun) {
rename(path, newPath);
} else {
log.vdebug("DRY-RUN: Skipping local file backup");
}
}
// Rename the given item, and only performs the function if not in a --dry-run scenario
void safeRename(const(char)[] oldPath, const(char)[] newPath, bool dryRun) {
// Perform the rename
if (!dryRun) {
log.vdebug("Calling rename(oldPath, newPath)");
// rename physical path on disk
rename(oldPath, newPath);
} else {
log.vdebug("DRY-RUN: Skipping local file rename");
}
}
// deletes the specified file without throwing an exception if it does not exists
void safeRemove(const(char)[] path)
{
void safeRemove(const(char)[] path) {
if (exists(path)) remove(path);
}
// returns the CRC32 hex string of a file
string computeCRC32(string path) {
CRC32 crc;
auto file = File(path, "rb");
foreach (ubyte[] data; chunks(file, 4096)) {
crc.put(data);
}
return crc.finish().toHexString().dup;
}
// returns the SHA1 hash hex string of a file
string computeSha1Hash(string path) {
SHA1 sha;
auto file = File(path, "rb");
foreach (ubyte[] data; chunks(file, 4096)) {
sha.put(data);
}
return sha.finish().toHexString().dup;
}
// returns the quickXorHash base64 string of a file
string computeQuickXorHash(string path)
{
string computeQuickXorHash(string path) {
QuickXor qxor;
auto file = File(path, "rb");
foreach (ubyte[] data; chunks(file, 4096)) {
@ -72,8 +117,7 @@ string computeSHA256Hash(string path) {
}
// converts wildcards (*, ?) to regex
Regex!char wild2regex(const(char)[] pattern)
{
Regex!char wild2regex(const(char)[] pattern) {
string str;
str.reserve(pattern.length + 2);
str ~= "^";
@ -115,53 +159,93 @@ Regex!char wild2regex(const(char)[] pattern)
return regex(str, "i");
}
// returns true if the network connection is available
bool testNetwork(Config cfg)
{
// Use low level HTTP struct
auto http = HTTP();
http.url = "https://login.microsoftonline.com";
// DNS lookup timeout
http.dnsTimeout = (dur!"seconds"(cfg.getValueLong("dns_timeout")));
// Timeout for connecting
http.connectTimeout = (dur!"seconds"(cfg.getValueLong("connect_timeout")));
// Data Timeout for HTTPS connections
http.dataTimeout = (dur!"seconds"(cfg.getValueLong("data_timeout")));
// maximum time any operation is allowed to take
// This includes dns resolution, connecting, data transfer, etc.
http.operationTimeout = (dur!"seconds"(cfg.getValueLong("operation_timeout")));
// What IP protocol version should be used when using Curl - IPv4 & IPv6, IPv4 or IPv6
http.handle.set(CurlOption.ipresolve,cfg.getValueLong("ip_protocol_version")); // 0 = IPv4 + IPv6, 1 = IPv4 Only, 2 = IPv6 Only
// Test Internet access to Microsoft OneDrive
bool testInternetReachability(ApplicationConfig appConfig) {
// Use preconfigured object with all the correct http values assigned
auto curlEngine = new CurlEngine();
curlEngine.initialise(appConfig.getValueLong("dns_timeout"), appConfig.getValueLong("connect_timeout"), appConfig.getValueLong("data_timeout"), appConfig.getValueLong("operation_timeout"), appConfig.defaultMaxRedirects, appConfig.getValueBool("debug_https"), appConfig.getValueString("user_agent"), appConfig.getValueBool("force_http_11"), appConfig.getValueLong("rate_limit"), appConfig.getValueLong("ip_protocol_version"));
// Configure the remaining items required
// URL to use
curlEngine.http.url = "https://login.microsoftonline.com";
// HTTP connection test method
http.method = HTTP.Method.head;
curlEngine.http.method = HTTP.Method.head;
// Attempt to contact the Microsoft Online Service
try {
log.vdebug("Attempting to contact online service");
http.perform();
log.vdebug("Shutting down HTTP engine as successfully reached OneDrive Online Service");
http.shutdown();
log.vdebug("Attempting to contact Microsoft OneDrive Login Service");
curlEngine.http.perform();
log.vdebug("Shutting down HTTP engine as successfully reached OneDrive Login Service");
curlEngine.http.shutdown();
// Free object and memory
object.destroy(curlEngine);
return true;
} catch (SocketException e) {
// Socket issue
log.vdebug("HTTP Socket Issue");
log.error("Cannot connect to Microsoft OneDrive Service - Socket Issue");
log.error("Cannot connect to Microsoft OneDrive Login Service - Socket Issue");
displayOneDriveErrorMessage(e.msg, getFunctionName!({}));
return false;
} catch (CurlException e) {
// No network connection to OneDrive Service
log.vdebug("No Network Connection");
log.error("Cannot connect to Microsoft OneDrive Service - Network Connection Issue");
log.error("Cannot connect to Microsoft OneDrive Login Service - Network Connection Issue");
displayOneDriveErrorMessage(e.msg, getFunctionName!({}));
return false;
}
}
// Retry Internet access test to Microsoft OneDrive
bool retryInternetConnectivtyTest(ApplicationConfig appConfig) {
// re-try network connection to OneDrive
// https://github.com/abraunegg/onedrive/issues/1184
// Back off & retry with incremental delay
int retryCount = 10000;
int retryAttempts = 1;
int backoffInterval = 1;
int maxBackoffInterval = 3600;
bool onlineRetry = false;
bool retrySuccess = false;
while (!retrySuccess){
// retry to access OneDrive API
backoffInterval++;
int thisBackOffInterval = retryAttempts*backoffInterval;
log.vdebug(" Retry Attempt: ", retryAttempts);
if (thisBackOffInterval <= maxBackoffInterval) {
log.vdebug(" Retry In (seconds): ", thisBackOffInterval);
Thread.sleep(dur!"seconds"(thisBackOffInterval));
} else {
log.vdebug(" Retry In (seconds): ", maxBackoffInterval);
Thread.sleep(dur!"seconds"(maxBackoffInterval));
}
// perform the re-rty
onlineRetry = testInternetReachability(appConfig);
if (onlineRetry) {
// We are now online
log.log("Internet connectivity to Microsoft OneDrive service has been restored");
retrySuccess = true;
} else {
// We are still offline
if (retryAttempts == retryCount) {
// we have attempted to re-connect X number of times
// false set this to true to break out of while loop
retrySuccess = true;
}
}
// Increment & loop around
retryAttempts++;
}
if (!onlineRetry) {
// Not online after 1.2 years of trying
log.error("ERROR: Was unable to reconnect to the Microsoft OneDrive service after 10000 attempts lasting over 1.2 years!");
}
// return the state
return onlineRetry;
}
// Can we read the file - as a permissions issue or file corruption will cause a failure
// https://github.com/abraunegg/onedrive/issues/113
// returns true if file can be accessed
bool readLocalFile(string path)
{
bool readLocalFile(string path) {
try {
// attempt to read up to the first 1 byte of the file
// validates we can 'read' the file based on file permissions
@ -175,8 +259,7 @@ bool readLocalFile(string path)
}
// calls globMatch for each string in pattern separated by '|'
bool multiGlobMatch(const(char)[] path, const(char)[] pattern)
{
bool multiGlobMatch(const(char)[] path, const(char)[] pattern) {
foreach (glob; pattern.split('|')) {
if (globMatch!(std.path.CaseSensitive.yes)(path, glob)) {
return true;
@ -185,8 +268,7 @@ bool multiGlobMatch(const(char)[] path, const(char)[] pattern)
return false;
}
bool isValidName(string path)
{
bool isValidName(string path) {
// Restriction and limitations about windows naming files
// https://msdn.microsoft.com/en-us/library/aa365247
// https://support.microsoft.com/en-us/help/3125202/restrictions-and-limitations-when-you-sync-files-and-folders
@ -223,8 +305,7 @@ bool isValidName(string path)
return matched;
}
bool containsBadWhiteSpace(string path)
{
bool containsBadWhiteSpace(string path) {
// allow root item
if (path == ".") {
return true;
@ -248,8 +329,7 @@ bool containsBadWhiteSpace(string path)
return m.empty;
}
bool containsASCIIHTMLCodes(string path)
{
bool containsASCIIHTMLCodes(string path) {
// https://github.com/abraunegg/onedrive/issues/151
// If a filename contains ASCII HTML codes, regardless of if it gets encoded, it generates an error
// Check if the filename contains an ASCII HTML code sequence
@ -265,17 +345,13 @@ bool containsASCIIHTMLCodes(string path)
}
// Parse and display error message received from OneDrive
void displayOneDriveErrorMessage(string message, string callingFunction)
{
void displayOneDriveErrorMessage(string message, string callingFunction) {
writeln();
log.error("ERROR: Microsoft OneDrive API returned an error with the following message:");
auto errorArray = splitLines(message);
log.error(" Error Message: ", errorArray[0]);
// Extract 'message' as the reason
JSONValue errorMessage = parseJSON(replace(message, errorArray[0], ""));
// extra debug
log.vdebug("Raw Error Data: ", message);
log.vdebug("JSON Message: ", errorMessage);
// What is the reason for the error
if (errorMessage.type() == JSONType.object) {
@ -332,12 +408,48 @@ void displayOneDriveErrorMessage(string message, string callingFunction)
}
// Where in the code was this error generated
log.vlog(" Calling Function: ", callingFunction);
log.log(" Calling Function: ", callingFunction);
// Extra Debug if we are using --verbose --verbose
log.vdebug("Raw Error Data: ", message);
log.vdebug("JSON Message: ", errorMessage);
}
// Common code for handling when a client is unauthorised
void handleClientUnauthorised(int httpStatusCode, string message) {
// Split the lines of the error message
auto errorArray = splitLines(message);
// Extract 'message' as the reason
JSONValue errorMessage = parseJSON(replace(message, errorArray[0], ""));
log.vdebug("errorMessage: ", errorMessage);
if (httpStatusCode == 400) {
// bad request or a new auth token is needed
// configure the error reason
writeln();
string[] errorReason = splitLines(errorMessage["error_description"].str);
log.errorAndNotify(errorReason[0]);
writeln();
log.errorAndNotify("ERROR: You will need to issue a --reauth and re-authorise this client to obtain a fresh auth token.");
writeln();
}
if (httpStatusCode == 401) {
writeln("CODING TO DO: Triggered a 401 HTTP unauthorised response when client was unauthorised");
writeln();
log.errorAndNotify("ERROR: Check your configuration as your refresh_token may be empty or invalid. You may need to issue a --reauth and re-authorise this client.");
writeln();
}
// Must exit here
exit(EXIT_FAILURE);
}
// Parse and display error message received from the local file system
void displayFileSystemErrorMessage(string message, string callingFunction)
{
void displayFileSystemErrorMessage(string message, string callingFunction) {
writeln();
log.error("ERROR: The local file system returned an error with the following message:");
auto errorArray = splitLines(message);
@ -349,10 +461,17 @@ void displayFileSystemErrorMessage(string message, string callingFunction)
ulong localActualFreeSpace = to!ulong(getAvailableDiskSpace("."));
if (localActualFreeSpace == 0) {
// force exit
exit(-1);
exit(EXIT_FAILURE);
}
}
// Display the POSIX Error Message
void displayPosixErrorMessage(string message) {
writeln();
log.error("ERROR: Microsoft OneDrive API returned data that highlights a POSIX compliance issue:");
log.error(" Error Message: ", message);
}
// Get the function name that is being called to assist with identifying where an error is being generated
string getFunctionName(alias func)() {
return __traits(identifier, __traits(parent, func)) ~ "()\n";
@ -527,7 +646,7 @@ void checkApplicationVersion() {
thisVersionReleaseGracePeriod = thisVersionReleaseGracePeriod.add!"months"(1);
log.vdebug("thisVersionReleaseGracePeriod: ", thisVersionReleaseGracePeriod);
// is this running version obsolete ?
// Is this running version obsolete ?
if (!displayObsolete) {
// if releaseGracePeriod > currentTime
// display an information warning that there is a new release available
@ -556,54 +675,110 @@ void checkApplicationVersion() {
}
}
// Unit Tests
unittest
{
assert(multiGlobMatch(".hidden", ".*"));
assert(multiGlobMatch(".hidden", "file|.*"));
assert(!multiGlobMatch("foo.bar", "foo|bar"));
// that should detect invalid file/directory name.
assert(isValidName("."));
assert(isValidName("./general.file"));
assert(!isValidName("./ leading_white_space"));
assert(!isValidName("./trailing_white_space "));
assert(!isValidName("./trailing_dot."));
assert(!isValidName("./includes<in the path"));
assert(!isValidName("./includes>in the path"));
assert(!isValidName("./includes:in the path"));
assert(!isValidName(`./includes"in the path`));
assert(!isValidName("./includes|in the path"));
assert(!isValidName("./includes?in the path"));
assert(!isValidName("./includes*in the path"));
assert(!isValidName("./includes / in the path"));
assert(!isValidName(`./includes\ in the path`));
assert(!isValidName(`./includes\\ in the path`));
assert(!isValidName(`./includes\\\\ in the path`));
assert(!isValidName("./includes\\ in the path"));
assert(!isValidName("./includes\\\\ in the path"));
assert(!isValidName("./CON"));
assert(!isValidName("./CON.text"));
assert(!isValidName("./PRN"));
assert(!isValidName("./AUX"));
assert(!isValidName("./NUL"));
assert(!isValidName("./COM0"));
assert(!isValidName("./COM1"));
assert(!isValidName("./COM2"));
assert(!isValidName("./COM3"));
assert(!isValidName("./COM4"));
assert(!isValidName("./COM5"));
assert(!isValidName("./COM6"));
assert(!isValidName("./COM7"));
assert(!isValidName("./COM8"));
assert(!isValidName("./COM9"));
assert(!isValidName("./LPT0"));
assert(!isValidName("./LPT1"));
assert(!isValidName("./LPT2"));
assert(!isValidName("./LPT3"));
assert(!isValidName("./LPT4"));
assert(!isValidName("./LPT5"));
assert(!isValidName("./LPT6"));
assert(!isValidName("./LPT7"));
assert(!isValidName("./LPT8"));
assert(!isValidName("./LPT9"));
bool hasId(JSONValue item) {
return ("id" in item) != null;
}
bool hasQuota(JSONValue item) {
return ("quota" in item) != null;
}
bool isItemDeleted(JSONValue item) {
return ("deleted" in item) != null;
}
bool isItemRoot(JSONValue item) {
return ("root" in item) != null;
}
bool hasParentReference(const ref JSONValue item) {
return ("parentReference" in item) != null;
}
bool hasParentReferenceId(JSONValue item) {
return ("id" in item["parentReference"]) != null;
}
bool hasParentReferencePath(JSONValue item) {
return ("path" in item["parentReference"]) != null;
}
bool isFolderItem(const ref JSONValue item) {
return ("folder" in item) != null;
}
bool isFileItem(const ref JSONValue item) {
return ("file" in item) != null;
}
bool isItemRemote(const ref JSONValue item) {
return ("remoteItem" in item) != null;
}
bool isItemFile(const ref JSONValue item) {
return ("file" in item) != null;
}
bool isItemFolder(const ref JSONValue item) {
return ("folder" in item) != null;
}
bool hasFileSize(const ref JSONValue item) {
return ("size" in item) != null;
}
bool isDotFile(const(string) path) {
// always allow the root
if (path == ".") return false;
auto paths = pathSplitter(buildNormalizedPath(path));
foreach(base; paths) {
if (startsWith(base, ".")){
return true;
}
}
return false;
}
bool isMalware(const ref JSONValue item) {
return ("malware" in item) != null;
}
bool hasHashes(const ref JSONValue item) {
return ("hashes" in item["file"]) != null;
}
bool hasQuickXorHash(const ref JSONValue item) {
return ("quickXorHash" in item["file"]["hashes"]) != null;
}
bool hasSHA256Hash(const ref JSONValue item) {
return ("sha256Hash" in item["file"]["hashes"]) != null;
}
bool isMicrosoftOneNoteMimeType1(const ref JSONValue item) {
return (item["file"]["mimeType"].str) == "application/msonenote";
}
bool isMicrosoftOneNoteMimeType2(const ref JSONValue item) {
return (item["file"]["mimeType"].str) == "application/octet-stream";
}
bool hasUploadURL(const ref JSONValue item) {
return ("uploadUrl" in item) != null;
}
bool hasNextExpectedRanges(const ref JSONValue item) {
return ("nextExpectedRanges" in item) != null;
}
bool hasLocalPath(const ref JSONValue item) {
return ("localPath" in item) != null;
}
bool hasETag(const ref JSONValue item) {
return ("eTag" in item) != null;
}
bool hasSharedElement(const ref JSONValue item) {
return ("eTag" in item) != null;
}