Release files for 2.5.0-rc2

* Release files for 2.5.0-rc2
* Code changes from 2.5.0-rc1 --> 2.5.0-rc2 (#2686)
* Update docs regarding Ubuntu 24.04
This commit is contained in:
abraunegg 2024-04-28 17:18:25 +10:00
parent 0f012b9f82
commit be3d28bf3c
15 changed files with 1837 additions and 2168 deletions

20
configure vendored
View file

@ -1,6 +1,6 @@
#! /bin/sh
# Guess values for system-dependent variables and create Makefiles.
# Generated by GNU Autoconf 2.69 for onedrive v2.5.0-rc1.
# Generated by GNU Autoconf 2.69 for onedrive v2.5.0-rc2.
#
# Report bugs to <https://github.com/abraunegg/onedrive>.
#
@ -579,8 +579,8 @@ MAKEFLAGS=
# Identity of this package.
PACKAGE_NAME='onedrive'
PACKAGE_TARNAME='onedrive'
PACKAGE_VERSION='v2.5.0-rc1'
PACKAGE_STRING='onedrive v2.5.0-rc1'
PACKAGE_VERSION='v2.5.0-rc2'
PACKAGE_STRING='onedrive v2.5.0-rc2'
PACKAGE_BUGREPORT='https://github.com/abraunegg/onedrive'
PACKAGE_URL=''
@ -1219,7 +1219,7 @@ if test "$ac_init_help" = "long"; then
# Omit some internal or obsolete options to make the list less imposing.
# This message is too long to be a string in the A/UX 3.1 sh.
cat <<_ACEOF
\`configure' configures onedrive v2.5.0-rc1 to adapt to many kinds of systems.
\`configure' configures onedrive v2.5.0-rc2 to adapt to many kinds of systems.
Usage: $0 [OPTION]... [VAR=VALUE]...
@ -1280,7 +1280,7 @@ fi
if test -n "$ac_init_help"; then
case $ac_init_help in
short | recursive ) echo "Configuration of onedrive v2.5.0-rc1:";;
short | recursive ) echo "Configuration of onedrive v2.5.0-rc2:";;
esac
cat <<\_ACEOF
@ -1393,7 +1393,7 @@ fi
test -n "$ac_init_help" && exit $ac_status
if $ac_init_version; then
cat <<\_ACEOF
onedrive configure v2.5.0-rc1
onedrive configure v2.5.0-rc2
generated by GNU Autoconf 2.69
Copyright (C) 2012 Free Software Foundation, Inc.
@ -1410,7 +1410,7 @@ cat >config.log <<_ACEOF
This file contains any messages produced by compilers while
running configure, to aid debugging if configure makes a mistake.
It was created by onedrive $as_me v2.5.0-rc1, which was
It was created by onedrive $as_me v2.5.0-rc2, which was
generated by GNU Autoconf 2.69. Invocation command line was
$ $0 $@
@ -2162,7 +2162,7 @@ fi
PACKAGE_DATE="March 2024"
PACKAGE_DATE="April 2024"
@ -3159,7 +3159,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
# report actual input values of CONFIG_FILES etc. instead of their
# values after options handling.
ac_log="
This file was extended by onedrive $as_me v2.5.0-rc1, which was
This file was extended by onedrive $as_me v2.5.0-rc2, which was
generated by GNU Autoconf 2.69. Invocation command line was
CONFIG_FILES = $CONFIG_FILES
@ -3212,7 +3212,7 @@ _ACEOF
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
ac_cs_version="\\
onedrive config.status v2.5.0-rc1
onedrive config.status v2.5.0-rc2
configured by $0, generated by GNU Autoconf 2.69,
with options \\"\$ac_cs_config\\"

View file

@ -9,7 +9,7 @@ dnl - commit the changed files (configure.ac, configure)
dnl - tag the release
AC_PREREQ([2.69])
AC_INIT([onedrive],[v2.5.0-rc1], [https://github.com/abraunegg/onedrive], [onedrive])
AC_INIT([onedrive],[v2.5.0-rc2], [https://github.com/abraunegg/onedrive], [onedrive])
AC_CONFIG_SRCDIR([src/main.d])

View file

@ -206,7 +206,7 @@ _**Description:**_ This setting controls the timeout duration, in seconds, for w
_**Value Type:**_ Integer
_**Default Value:**_ 240
_**Default Value:**_ 60
_**Config Example:**_ `data_timeout = "300"`

View file

@ -31,6 +31,7 @@ Only the current release version or greater is supported. Earlier versions are n
| Ubuntu 20.04 | [onedrive](https://packages.ubuntu.com/focal/onedrive) |<a href="https://packages.ubuntu.com/focal/onedrive"><img src="https://repology.org/badge/version-for-repo/ubuntu_20_04/onedrive.svg?header=" alt="Ubuntu 20.04 package" width="46" height="20"></a> |❌|✔|✔|✔| **Note:** Do not install from Ubuntu Universe as the package is obsolete and is not supported<br><br>For a supported application version, it is recommended that for Ubuntu that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) |
| Ubuntu 22.04 | [onedrive](https://packages.ubuntu.com/jammy/onedrive) |<a href="https://packages.ubuntu.com/jammy/onedrive"><img src="https://repology.org/badge/version-for-repo/ubuntu_22_04/onedrive.svg?header=" alt="Ubuntu 22.04 package" width="46" height="20"></a> |❌|✔|✔|✔| **Note:** Do not install from Ubuntu Universe as the package is obsolete and is not supported<br><br>For a supported application version, it is recommended that for Ubuntu that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) |
| Ubuntu 23.04 | [onedrive](https://packages.ubuntu.com/lunar/onedrive) |<a href="https://packages.ubuntu.com/lunar/onedrive"><img src="https://repology.org/badge/version-for-repo/ubuntu_23_04/onedrive.svg?header=" alt="Ubuntu 23.04 package" width="46" height="20"></a> |❌|✔|✔|✔| **Note:** Do not install from Ubuntu Universe as the package is obsolete and is not supported<br><br>For a supported application version, it is recommended that for Ubuntu that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) |
| Ubuntu 24.04 | [onedrive](https://packages.ubuntu.com/noble/onedrive) |<a href="https://packages.ubuntu.com/noble/onedrive"><img src="https://repology.org/badge/version-for-repo/ubuntu_24_04/onedrive.svg?header=" alt="Ubuntu 24.04 package" width="46" height="20"></a> |❌|✔|✔|✔| **Note:** Do not install from Ubuntu Universe as the package is obsolete and is not supported<br><br>For a supported application version, it is recommended that for Ubuntu that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) |
| Void Linux | [onedrive](https://voidlinux.org/packages/?arch=x86_64&q=onedrive) |<a href="https://voidlinux.org/packages/?arch=x86_64&q=onedrive"><img src="https://repology.org/badge/version-for-repo/void_x86_64/onedrive.svg?header=" alt="Void Linux x86_64 package" width="46" height="20"></a>|✔|✔|❌|❌| |
## Building from Source - High Level Requirements
@ -61,7 +62,7 @@ Ubuntu Linux 18.x LTS reached the end of its five-year LTS window on May 31th 20
### Dependencies: Debian 9
Debian 9 reached the end of its five-year support window on June 30th 2022 and is no longer supported.
### Dependencies: Ubuntu 20.x -> Ubuntu 23.x / Debian 10 -> Debian 12 - x86_64
### Dependencies: Ubuntu 20.x -> Ubuntu 24.x / Debian 10 -> Debian 12 - x86_64
These dependencies are also applicable for all Ubuntu based distributions such as:
* Lubuntu
* Linux Mint

View file

@ -171,6 +171,7 @@ If required, review the table below based on your 'lsb_release' information to p
| Ubuntu 22.10 / Kinetic | Use [Ubuntu 22.10](#distribution-ubuntu-2210) instructions below |
| Ubuntu 23.04 / Lunar | Use [Ubuntu 23.04](#distribution-ubuntu-2304) instructions below |
| Ubuntu 23.10 / Mantic | Use [Ubuntu 23.10](#distribution-ubuntu-2310) instructions below |
| Ubuntu 24.04 / Noble | Use [Ubuntu 24.04](#distribution-ubuntu-2404) instructions below |
> [!IMPORTANT]
> If your Linux distribution and release is not in the table above, you have 2 options:
@ -423,6 +424,32 @@ Run: `sudo apt install --no-install-recommends --no-install-suggests onedrive`
#### Step 5: Read 'Known Issues' with these packages
Read and understand the [known issues](#known-issues-with-installing-from-the-above-packages) with these packages below, taking any action that is needed.
### Distribution: Ubuntu 24.04
The packages support the following platform architectures:
| &nbsp;i686&nbsp; | x86_64 | ARMHF | AARCH64 |
|:----:|:------:|:-----:|:-------:|
|❌|✔|❌|✔|
#### Step 1: Add the OpenSuSE Build Service repository release key
Add the OpenSuSE Build Service repository release key using the following command:
```text
wget -qO - https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/xUbuntu_24.04/Release.key | gpg --dearmor | sudo tee /usr/share/keyrings/obs-onedrive.gpg > /dev/null
```
#### Step 2: Add the OpenSuSE Build Service repository
Add the OpenSuSE Build Service repository using the following command:
```text
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/obs-onedrive.gpg] https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/xUbuntu_24.04/ ./" | sudo tee /etc/apt/sources.list.d/onedrive.list
```
#### Step 3: Update your apt package cache
Run: `sudo apt-get update`
#### Step 4: Install 'onedrive'
Run: `sudo apt install --no-install-recommends --no-install-suggests onedrive`
#### Step 5: Read 'Known Issues' with these packages
Read and understand the [known issues](#known-issues-with-installing-from-the-above-packages) with these packages below, taking any action that is needed.
## Known Issues with Installing from the above packages

View file

@ -30,6 +30,13 @@ class ClientSideFiltering {
this.appConfig = appConfig;
}
~this() {
object.destroy(appConfig);
object.destroy(paths);
object.destroy(fileMask);
object.destroy(directoryMask);
}
// Initialise the required items
bool initialise() {
// Log what is being done

View file

@ -61,17 +61,17 @@ class ApplicationConfig {
// HTTP Struct items, used for configuring HTTP()
// Curl Timeout Handling
// libcurl dns_cache_timeout timeout
immutable int defaultDnsTimeout = 60;
immutable int defaultDnsTimeout = 60; // in seconds
// Connect timeout for HTTP|HTTPS connections
// Controls CURLOPT_CONNECTTIMEOUT
immutable int defaultConnectTimeout = 10;
// Default data timeout for HTTP
immutable int defaultConnectTimeout = 10; // in seconds
// Default data timeout for HTTP operations
// curl.d has a default of: _defaultDataTimeout = dur!"minutes"(2);
immutable int defaultDataTimeout = 240;
immutable int defaultDataTimeout = 60; // in seconds
// Maximum time any operation is allowed to take
// This includes dns resolution, connecting, data transfer, etc.
// Controls CURLOPT_TIMEOUT
immutable int defaultOperationTimeout = 3600;
immutable int defaultOperationTimeout = 3600; // in seconds
// Specify what IP protocol version should be used when communicating with OneDrive
immutable int defaultIpProtocol = 0; // 0 = IPv4 + IPv6, 1 = IPv4 Only, 2 = IPv6 Only
// Specify how many redirects should be allowed
@ -682,11 +682,22 @@ class ApplicationConfig {
}
auto file = File(filename, "r");
scope(exit) file.close();
scope(failure) file.close();
string lineBuffer;
scope(exit) {
file.close();
object.destroy(file);
object.destroy(lineBuffer);
}
scope(failure) {
file.close();
object.destroy(file);
object.destroy(lineBuffer);
}
foreach (line; file.byLine()) {
string lineBuffer = stripLeft(line).to!string;
lineBuffer = stripLeft(line).to!string;
if (lineBuffer.empty || lineBuffer[0] == ';' || lineBuffer[0] == '#') continue;
auto c = lineBuffer.matchFirst(configRegex);
if (c.empty) {

View file

@ -3,7 +3,7 @@ module curlEngine;
// What does this module require to function?
import std.net.curl;
import etc.c.curl: CurlOption;
import etc.c.curl;
import std.datetime;
import std.conv;
import std.file;
@ -13,6 +13,7 @@ import std.range;
// What other modules that we have created do we need to import?
import log;
import util;
class CurlResponse {
HTTP.Method method;
@ -20,23 +21,28 @@ class CurlResponse {
const(char)[][const(char)[]] requestHeaders;
const(char)[] postBody;
bool hasResponse;
string[string] responseHeaders;
HTTP.StatusLine statusLine;
char[] content;
this() {
reset();
}
void reset() {
method = HTTP.Method.undefined;
url = null;
url = "";
requestHeaders = null;
postBody = null;
postBody = [];
hasResponse = false;
responseHeaders = null;
object.destroy(statusLine);
content = null;
statusLine.reset();
content = [];
}
void addRequestHeader(const(char)[] name, const(char)[] value) {
requestHeaders[name] = value;
requestHeaders[to!string(name)] = to!string(value);
}
void connect(HTTP.Method method, const(char)[] url) {
@ -56,6 +62,7 @@ class CurlResponse {
};
void update(HTTP *http) {
hasResponse = true;
this.responseHeaders = http.responseHeaders();
this.statusLine = http.statusLine;
}
@ -65,40 +72,48 @@ class CurlResponse {
}
// Return the current value of retryAfterValue
ulong getRetryAfterValue() {
ulong delayBeforeRetry;
// is retry-after in the response headers
int getRetryAfterValue() {
int delayBeforeRetry;
// Is 'retry-after' in the response headers
if ("retry-after" in responseHeaders) {
// Set the retry-after value
addLogEntry("curlEngine.http.perform() => Received a 'Retry-After' Header Response with the following value: " ~ to!string(responseHeaders["retry-after"]), ["debug"]);
addLogEntry("curlEngine.http.perform() => Setting retryAfterValue to: " ~ responseHeaders["retry-after"], ["debug"]);
delayBeforeRetry = to!ulong(responseHeaders["retry-after"]);
delayBeforeRetry = to!int(responseHeaders["retry-after"]);
} else {
// Use a 120 second delay as a default given header value was zero
// This value is based on log files and data when determining correct process for 429 response handling
delayBeforeRetry = 120;
// Update that we are over-riding the provided value with a default
addLogEntry("HTTP Response Header retry-after value was 0 - Using a preconfigured default of: " ~ to!string(delayBeforeRetry), ["debug"]);
addLogEntry("HTTP Response Header retry-after value was missing - Using a preconfigured default of: " ~ to!string(delayBeforeRetry), ["debug"]);
}
return delayBeforeRetry; // default to 60 seconds
return delayBeforeRetry;
}
const string parseHeaders(const(string[string]) headers) {
string responseHeadersStr = "";
foreach (const(char)[] header; headers.byKey()) {
responseHeadersStr ~= "> " ~ header ~ ": " ~ headers[header] ~ "\n";
}
return responseHeadersStr;
}
const string parseHeaders(const(const(char)[][const(char)[]]) headers) {
string responseHeadersStr = "";
const string parseRequestHeaders(const(const(char)[][const(char)[]]) headers) {
string requestHeadersStr = "";
foreach (string header; headers.byKey()) {
if (header == "Authorization")
if (header == "Authorization") {
continue;
responseHeadersStr ~= "< " ~ header ~ ": " ~ headers[header] ~ "\n";
}
// Use the 'in' operator to safely check if the key exists in the associative array.
if (auto val = header in headers) {
requestHeadersStr ~= "< " ~ header ~ ": " ~ *val ~ "\n";
}
}
return requestHeadersStr;
}
const string parseResponseHeaders(const(immutable(char)[][immutable(char)[]]) headers) {
string responseHeadersStr = "";
// Ensure response headers is not null and iterate over keys safely.
if (headers !is null) {
foreach (const(char)[] header; headers.byKey()) {
// Check if the key actually exists before accessing it to avoid RangeError.
if (auto val = header in headers) { // 'in' checks for the key and returns a pointer to the value if found.
responseHeadersStr ~= "> " ~ header ~ ": " ~ *val ~ "\n"; // Dereference pointer to get the value.
}
}
}
return responseHeadersStr;
}
@ -110,14 +125,14 @@ class CurlResponse {
string str = "";
str ~= format("< %s %s\n", method, url);
if (!requestHeaders.empty) {
str ~= parseHeaders(requestHeaders);
str ~= parseRequestHeaders(requestHeaders);
}
if (!postBody.empty) {
str ~= format("----\n%s\n----\n", postBody);
str ~= format("\n----\n%s\n----\n", postBody);
}
str ~= format("< %s\n", statusLine);
if (!responseHeaders.empty) {
str ~= parseHeaders(responseHeaders);
str ~= parseResponseHeaders(responseHeaders);
}
return str;
}
@ -128,7 +143,7 @@ class CurlResponse {
string str = "";
if (!content.empty) {
str ~= format("----\n%s\n----\n", content);
str ~= format("\n----\n%s\n----\n", content);
}
return str;
}
@ -136,74 +151,121 @@ class CurlResponse {
override string toString() const {
string str = "Curl debugging: \n";
str ~= dumpDebug();
str ~= "Curl response: \n";
str ~= dumpResponse();
if (hasResponse) {
str ~= "Curl response: \n";
str ~= dumpResponse();
}
return str;
}
CurlResponse dup() {
CurlResponse copy = new CurlResponse();
copy.method = method;
copy.url = url;
copy.requestHeaders = requestHeaders;
copy.postBody = postBody;
copy.responseHeaders = responseHeaders;
copy.statusLine = statusLine;
copy.content = content;
return copy;
}
}
class CurlEngine {
__gshared CurlEngine[] curlEnginePool;
static CurlEngine get() {
synchronized(CurlEngine.classinfo) {
if (curlEnginePool.empty) {
return new CurlEngine;
} else {
CurlEngine curlEngine = curlEnginePool[$-1];
curlEnginePool.popBack();
return curlEngine;
}
}
}
static releaseAll() {
synchronized(CurlEngine.classinfo) {
foreach(curlEngine; curlEnginePool) {
curlEngine.shutdown();
object.destroy(curlEngine);
}
curlEnginePool = null;
}
}
void release() {
cleanUp();
synchronized(CurlEngine.classinfo) {
curlEnginePool ~= this;
}
}
// Shared pool of CurlEngine instances accessible across all threads
__gshared CurlEngine[] curlEnginePool; // __gshared is used to declare a variable that is shared across all threads
HTTP http;
File uploadFile;
CurlResponse response;
bool keepAlive;
ulong dnsTimeout;
CurlResponse response;
this() {
http = HTTP();
response = new CurlResponse();
}
string internalThreadId;
this() {
http = HTTP(); // Directly initializes HTTP using its default constructor
response = null; // Initialize as null
internalThreadId = generateAlphanumericString();
}
// The destructor should only clean up resources owned directly by this instance
~this() {
object.destroy(http);
object.destroy(response);
// Is the file still open?
if (uploadFile.isOpen()) {
uploadFile.close();
}
// Is 'response' cleared?
if (response !is null) {
object.destroy(response); // Destroy, then set to null
response = null;
}
// Is the actual http instance is stopped?
if (!http.isStopped) {
// HTTP instance was not stopped .. need to stop it
http.shutdown();
object.destroy(http); // Destroy, however we cant set to null
}
}
static CurlEngine getCurlInstance() {
synchronized (CurlEngine.classinfo) {
// What is the current pool size
addLogEntry("CURL ENGINE AVAILABLE POOL SIZE: " ~ to!string(curlEnginePool.length), ["debug"]);
if (curlEnginePool.empty) {
addLogEntry("CURL ENGINE POOL EMPTY - CONSTRUCTING A NEW CURL ENGINE INSTANCE" , ["debug"]);
return new CurlEngine; // Constructs a new CurlEngine with a fresh HTTP instance
} else {
CurlEngine curlEngine = curlEnginePool[$ - 1];
curlEnginePool.popBack();
// Is this engine stopped?
if (curlEngine.http.isStopped) {
// return a new curl engine as a stopped one cannot be used
addLogEntry("CURL ENGINE WAS STOPPED - CONSTRUCTING A NEW CURL ENGINE INSTANCE" , ["debug"]);
return new CurlEngine; // Constructs a new CurlEngine with a fresh HTTP instance
} else {
// return an existing curl engine
addLogEntry("CURL ENGINE WAS VALID - RETURNED AN EXISTING CURL ENGINE INSTANCE" , ["debug"]);
addLogEntry("CURL ENGINE ID: " ~ curlEngine.internalThreadId, ["debug"]);
return curlEngine;
}
}
}
}
static void releaseAllCurlInstances() {
synchronized (CurlEngine.classinfo) {
// What is the current pool size
addLogEntry("CURL ENGINES TO RELEASE: " ~ to!string(curlEnginePool.length), ["debug"]);
// Safely iterate and clean up each CurlEngine instance
foreach (curlEngineInstance; curlEnginePool) {
try {
curlEngineInstance.cleanup(); // Cleanup instance by resetting values
curlEngineInstance.shutdownCurlHTTPInstance(); // Assume proper cleanup of any resources used by HTTP
} catch (Exception e) {
// Log the error or handle it appropriately
// e.g., writeln("Error during cleanup/shutdown: ", e.toString());
}
// It's safe to destroy the object here assuming no other references exist
object.destroy(curlEngineInstance); // Destroy, then set to null
curlEngineInstance = null;
}
// Clear the array after all instances have been handled
curlEnginePool.length = 0; // More explicit than curlEnginePool = [];
}
}
// Destroy all curl instances
static void destroyAllCurlInstances() {
addLogEntry("DESTROY ALL CURL ENGINES", ["debug"]);
// Release all 'curl' instances
releaseAllCurlInstances();
}
// We are releasing a curl instance back to the pool
void releaseEngine() {
addLogEntry("CurlEngine releaseEngine() CALLED", ["debug"]);
addLogEntry("CURRENT CURL ENGINE AVAILABLE POOL SIZE: " ~ to!string(curlEnginePool.length), ["debug"]);
cleanup();
synchronized (CurlEngine.classinfo) {
curlEnginePool ~= this;
addLogEntry("CURL ENGINE POOL SIZE AFTER RELEASE BACK TO POOL: " ~ to!string(curlEnginePool.length), ["debug"]);
}
}
void initialise(ulong dnsTimeout, ulong connectTimeout, ulong dataTimeout, ulong operationTimeout, int maxRedirects, bool httpsDebug, string userAgent, bool httpProtocol, ulong userRateLimit, ulong protocolVersion, bool keepAlive=true) {
// Setting this to false ensures that when we close the curl instance, any open sockets are closed - which we need to do when running
// multiple threads and API instances at the same time otherwise we run out of local files | sockets pretty quickly
@ -287,12 +349,24 @@ class CurlEngine {
}
}
void setResponseHolder(CurlResponse response) {
if (response is null) {
// Create a response instance if it doesn't already exist
if (this.response is null)
this.response = new CurlResponse();
} else {
this.response = response;
}
}
void addRequestHeader(const(char)[] name, const(char)[] value) {
setResponseHolder(null);
http.addRequestHeader(name, value);
response.addRequestHeader(name, value);
}
void connect(HTTP.Method method, const(char)[] url) {
setResponseHolder(null);
if (!keepAlive)
addRequestHeader("Connection", "close");
http.method = method;
@ -301,6 +375,7 @@ class CurlEngine {
}
void setContent(const(char)[] contentType, const(char)[] sendData) {
setResponseHolder(null);
addRequestHeader("Content-Type", contentType);
if (sendData) {
http.contentLength = sendData.length;
@ -316,16 +391,33 @@ class CurlEngine {
}
}
void setFile(File* file, ulong offsetSize) {
void setFile(string filepath, string contentRange, ulong offset, ulong offsetSize) {
setResponseHolder(null);
// open file as read-only in binary mode
uploadFile = File(filepath, "rb");
if (contentRange.empty) {
offsetSize = uploadFile.size();
} else {
addRequestHeader("Content-Range", contentRange);
uploadFile.seek(offset);
}
// Setup progress bar to display
http.onProgress = delegate int(size_t dltotal, size_t dlnow, size_t ultotal, size_t ulnow) {
return 0;
};
addRequestHeader("Content-Type", "application/octet-stream");
http.onSend = data => file.rawRead(data).length;
http.onSend = data => uploadFile.rawRead(data).length;
http.contentLength = offsetSize;
}
CurlResponse execute() {
scope(exit) {
cleanUp();
cleanup();
}
setResponseHolder(null);
http.onReceive = (ubyte[] data) {
response.content ~= data;
// HTTP Server Response Code Debugging if --https-debug is being used
@ -334,20 +426,17 @@ class CurlEngine {
};
http.perform();
response.update(&http);
return response.dup;
return response;
}
CurlResponse download(string originalFilename, string downloadFilename) {
// Threshold for displaying download bar
long thresholdFileSize = 4 * 2^^20; // 4 MiB
CurlResponse response = new CurlResponse();
setResponseHolder(null);
// open downloadFilename as write in binary mode
auto file = File(downloadFilename, "wb");
// function scopes
scope(exit) {
cleanUp();
cleanup();
if (file.isOpen()){
// close open file
file.close();
@ -368,27 +457,44 @@ class CurlEngine {
return response;
}
void cleanUp() {
void cleanup() {
// Reset any values to defaults, freeing any set objects
http.clearRequestHeaders();
http.onSend = null;
http.onReceive = null;
http.onReceiveHeader = null;
http.onReceiveStatusLine = null;
http.onProgress = delegate int(size_t dltotal, size_t dlnow, size_t ultotal, size_t ulnow) {
return 0;
};
http.contentLength = 0;
response.reset();
addLogEntry("CurlEngine cleanup() CALLED", ["debug"]);
// Is the instance is stopped?
if (!http.isStopped) {
// A stopped instance is not usable, these cannot be reset
http.clearRequestHeaders();
http.onSend = null;
http.onReceive = null;
http.onReceiveHeader = null;
http.onReceiveStatusLine = null;
http.onProgress = delegate int(size_t dltotal, size_t dlnow, size_t ultotal, size_t ulnow) {
return 0;
};
http.contentLength = 0;
http.flushCookieJar();
http.clearSessionCookies();
http.clearAllCookies();
}
// set the response to null
response = null;
// close file if open
if (uploadFile.isOpen()){
// close open file
uploadFile.close();
}
}
void shutdown() {
void shutdownCurlHTTPInstance() {
// Shut down the curl instance & close any open sockets
http.shutdown();
}
void setDisableSSLVerifyPeer() {
addLogEntry("CAUTION: Switching off CurlOption.ssl_verifypeer ... this makes the application insecure.", ["debug"]);
http.handle.set(CurlOption.ssl_verifypeer, 0);
addLogEntry("HTTP SHUTDOWN CALLED ..." , ["debug"]);
// Is the instance is stopped?
if (!http.isStopped) {
http.shutdown();
}
}
}

View file

@ -52,7 +52,14 @@ class LogBuffer {
flushThread.isDaemon(true);
flushThread.start();
}
// The destructor should only clean up resources owned directly by this instance
~this() {
object.destroy(bufferLock);
object.destroy(condReady);
object.destroy(flushThread);
}
void shutdown() {
synchronized(bufferLock) {
if (!isRunning) return; // Prevent multiple shutdowns
@ -62,6 +69,7 @@ class LogBuffer {
flushThread.join(); // Wait for the flush thread to finish
flush(); // Perform a final flush to ensure all data is processed
}
shared void logThisMessage(string message, string[] levels = ["info"]) {
// Generate the timestamp for this log entry
auto timeStamp = leftJustify(Clock.currTime().toString(), 28, '0');
@ -100,7 +108,9 @@ class LogBuffer {
// Use dnotify's functionality for GUI notifications, if GUI notifications is enabled
version(Notifications) {
try {
auto n = new Notification("Log Notification", message, "IGNORED");
auto n = new Notification("OneDrive Client", message, "IGNORED");
// Show notification for 10 seconds
n.timeout = 10;
n.show();
} catch (NotificationError e) {
sendGUINotification = false;

View file

@ -4,6 +4,7 @@ module main;
// What does this module require to function?
import core.stdc.stdlib: EXIT_SUCCESS, EXIT_FAILURE, exit;
import core.stdc.signal;
import core.sys.posix.signal;
import core.memory;
import core.time;
import core.thread;
@ -44,7 +45,19 @@ ItemDatabase itemDB;
ClientSideFiltering selectiveSync;
Monitor filesystemMonitor;
// Class variables
// Flag for performing a synchronised shutdown
bool shutdownInProgress = false;
// Flag if a --dry-run is being performed, as, on shutdown, once config is destroyed, we have no reference here
bool dryRun = false;
// Configure the runtime database file path so that it is available to us on shutdown so objects can be destroyed and removed if required
// - Typically this will be the default, but in a --dry-run scenario, we use a separate database file
string runtimeDatabaseFile = "";
int main(string[] cliArgs) {
// Setup CTRL-C handler
setupSignalHandler();
// Application Start Time - used during monitor loop to detail how long it has been running for
auto applicationStartTime = Clock.currTime();
// Disable buffering on stdout - this is needed so that when we are using plain write() it will go to the terminal without flushing
@ -63,8 +76,7 @@ int main(string[] cliArgs) {
// What is the runtime syncronisation directory that will be used
// Typically this will be '~/OneDrive' .. however tilde expansion is unreliable
string runtimeSyncDirectory = "";
// Configure the runtime database file path. Typically this will be the default, but in a --dry-run scenario, we use a separate database file
string runtimeDatabaseFile = "";
// Verbosity Logging Count - this defines if verbose or debug logging is being used
long verbosityCount = 0;
// Application Logging Level
@ -86,15 +98,15 @@ int main(string[] cliArgs) {
scope(exit) {
// Detail what scope was called
addLogEntry("Exit scope was called", ["debug"]);
// Perform exit tasks
performStandardExitProcess("exitScope");
// Perform synchronised exit
performSynchronisedExitProcess("exitScope");
}
scope(failure) {
// Detail what scope was called
addLogEntry("Failure scope was called", ["debug"]);
// Perform exit tasks
performStandardExitProcess("failureScope");
// Perform synchronised exit
performSynchronisedExitProcess("failureScope");
}
// Read in application options as passed in
@ -140,23 +152,6 @@ int main(string[] cliArgs) {
// If we need to enable logging to a file, we can only do this once we know the application configuration which is done slightly later on
initialiseLogging(verboseLogging, debugLogging);
/**
// most used
addLogEntry("Basic 'info' message", ["info"]); .... or just use addLogEntry("Basic 'info' message");
addLogEntry("Basic 'verbose' message", ["verbose"]);
addLogEntry("Basic 'debug' message", ["debug"]);
// GUI notify only
addLogEntry("Basic 'notify' ONLY message and displayed in GUI if notifications are enabled", ["notify"]);
// info and notify
addLogEntry("Basic 'info and notify' message and displayed in GUI if notifications are enabled", ["info", "notify"]);
// log file only
addLogEntry("Information sent to the log file only, and only if logging to a file is enabled", ["logFileOnly"]);
// Console only (session based upload|download)
addLogEntry("Basic 'Console only with new line' message", ["consoleOnly"]);
// Console only with no new line
addLogEntry("Basic 'Console only with no new line' message", ["consoleOnlyNoNewLine"]);
**/
// Log application start time, log line has start time
addLogEntry("Application started", ["debug"]);
@ -188,6 +183,9 @@ int main(string[] cliArgs) {
// Update the current runtime application configuration (default or 'config' fileread-in options) from any passed in command line arguments
appConfig.updateFromArgs(cliArgs);
// Configure dryRun so that this can be used here & during shutdown
dryRun = appConfig.getValueBool("dry_run");
// As early as possible, now re-configure the logging class, given that we have read in any applicable 'config' file and updated the application running config from CLI input:
// - Enable logging to a file if this is required
// - Disable GUI notifications if this has been configured
@ -264,9 +262,9 @@ int main(string[] cliArgs) {
// Check for --dry-run operation or a 'no-sync' operation where the 'dry-run' DB copy should be used
// If this has been requested, we need to ensure that all actions are performed against the dry-run database copy, and,
// no actual action takes place - such as deleting files if deleted online, moving files if moved online or local, downloading new & changed files, uploading new & changed files
if ((appConfig.getValueBool("dry_run")) || (appConfig.hasNoSyncOperationBeenRequested())) {
if (dryRun || (appConfig.hasNoSyncOperationBeenRequested())) {
if (appConfig.getValueBool("dry_run")) {
if (dryRun) {
// This is a --dry-run operation
addLogEntry("DRY-RUN Configured. Output below shows what 'would' have occurred.");
}
@ -279,26 +277,29 @@ int main(string[] cliArgs) {
// In a --dry-run --resync scenario, we should not copy the existing database file
if (!appConfig.getValueBool("resync")) {
// Copy the existing DB file to the dry-run copy
if (appConfig.getValueBool("dry_run")) {
if (dryRun) {
addLogEntry("DRY-RUN: Copying items.sqlite3 to items-dryrun.sqlite3 to use for dry run operations");
}
copy(appConfig.databaseFilePath,appConfig.databaseFilePathDryRun);
} else {
// No database copy due to --resync
if (appConfig.getValueBool("dry_run")) {
if (dryRun) {
addLogEntry("DRY-RUN: No database copy created for --dry-run due to --resync also being used");
}
}
}
// update runtimeDatabaseFile now that we are using the dry run path
runtimeDatabaseFile = appConfig.databaseFilePathDryRun;
} else {
// Cleanup any existing dry-run elements ... these should never be left hanging around
cleanupDryRunDatabaseFiles(appConfig.databaseFilePathDryRun);
}
// Handle --logout as separate item, do not 'resync' on a --logout
if (appConfig.getValueBool("logout")) {
addLogEntry("--logout requested", ["debug"]);
addLogEntry("Deleting the saved authentication status ...");
if (!appConfig.getValueBool("dry_run")) {
if (!dryRun) {
safeRemove(appConfig.refreshTokenFilePath);
} else {
// --dry-run scenario ... technically we should not be making any local file changes .......
@ -312,7 +313,7 @@ int main(string[] cliArgs) {
if (appConfig.getValueBool("reauth")) {
addLogEntry("--reauth requested", ["debug"]);
addLogEntry("Deleting the saved authentication status ... re-authentication requested");
if (!appConfig.getValueBool("dry_run")) {
if (!dryRun) {
safeRemove(appConfig.refreshTokenFilePath);
} else {
// --dry-run scenario ... technically we should not be making any local file changes .......
@ -433,9 +434,9 @@ int main(string[] cliArgs) {
// Flag that we were able to initalise the API in the application config
oneDriveApiInstance.debugOutputConfiguredAPIItems();
oneDriveApiInstance.shutdown();
oneDriveApiInstance.releaseCurlEngine();
object.destroy(oneDriveApiInstance);
oneDriveApiInstance = null;
// Need to configure the itemDB and syncEngineInstance for 'sync' and 'non-sync' operations
addLogEntry("Opening the item database ...", ["verbose"]);
@ -824,10 +825,6 @@ int main(string[] cliArgs) {
}
};
// Handle SIGINT and SIGTERM
signal(SIGINT, &exitHandler);
signal(SIGTERM, &exitHandler);
// Initialise the local filesystem monitor class using inotify to monitor for local filesystem changes
// If we are in a --download-only method of operation, we do not enable local filesystem monitoring
if (!appConfig.getValueBool("download_only")) {
@ -997,13 +994,17 @@ int main(string[] cliArgs) {
addLogEntry("End Monitor Loop Time: " ~ to!string(endFunctionProcessingTime), ["debug"]);
addLogEntry("Elapsed Monitor Loop Processing Time: " ~ to!string((endFunctionProcessingTime - startFunctionProcessingTime)), ["debug"]);
// Display memory details before cleanup
// Release all the curl instances used during this loop
// New curl instances will be established on next loop
CurlEngine.releaseAllCurlInstances();
// Display memory details before garbage collection
if (displayMemoryUsage) displayMemoryUsagePreGC();
// Perform Garbage Cleanup
// Perform Garbage Collection
GC.collect();
// Return free memory to the OS
GC.minimize();
// Display memory details after cleanup
// Display memory details after garbage collection
if (displayMemoryUsage) displayMemoryUsagePostGC();
// Log that this loop is complete
@ -1111,78 +1112,6 @@ int main(string[] cliArgs) {
}
}
void performStandardExitProcess(string scopeCaller = null) {
// Who called this function
if (!scopeCaller.empty) {
addLogEntry("Running performStandardExitProcess due to: " ~ scopeCaller, ["debug"]);
}
// Shutdown the OneDrive Webhook instance
if (oneDriveWebhook !is null) {
oneDriveWebhook.stop();
object.destroy(oneDriveWebhook);
}
// Shutdown the sync engine
if (syncEngineInstance !is null) {
addLogEntry("Shutdown Sync Engine instance", ["debug"]);
object.destroy(syncEngineInstance);
}
// Shutdown the client side filtering objects
if (selectiveSync !is null) {
addLogEntry("Shutdown Client Side Filtering instance", ["debug"]);
selectiveSync.shutdown();
object.destroy(selectiveSync);
}
// Shutdown the application configuration objects
if (appConfig !is null) {
addLogEntry("Shutdown Application Configuration instance", ["debug"]);
// Cleanup any existing dry-run elements ... these should never be left hanging around
cleanupDryRunDatabaseFiles(appConfig.databaseFilePathDryRun);
object.destroy(appConfig);
}
// Shutdown any local filesystem monitoring
if (filesystemMonitor !is null) {
addLogEntry("Shutdown Filesystem Monitoring instance", ["debug"]);
filesystemMonitor.shutdown();
object.destroy(filesystemMonitor);
}
// Shutdown the database
if (itemDB !is null) {
addLogEntry("Shutdown Database instance", ["debug"]);
// Make sure the .wal file is incorporated into the main db before we exit
if (itemDB.isDatabaseInitialised()) {
itemDB.performVacuum();
}
object.destroy(itemDB);
}
// Shutdown cached sockets
CurlEngine.releaseAll();
// Set all objects to null
if (scopeCaller == "failureScope") {
// Set these to be null due to failure scope - prevent 'ERROR: Unable to perform a database vacuum: out of memory' when the exit scope is then called
addLogEntry("Setting ALL Class Objects to null due to failure scope", ["debug"]);
itemDB = null;
appConfig = null;
oneDriveWebhook = null;
selectiveSync = null;
syncEngineInstance = null;
} else {
addLogEntry("Waiting for all internal threads to complete before exiting application", ["verbose"]);
addLogEntry("Application exit", ["debug"]);
addLogEntry("#######################################################################################################################################", ["logFileOnly"]);
// Destroy the shared logging buffer
(cast() logBuffer).shutdown();
object.destroy(logBuffer);
}
}
void oneDriveWebhookCallback() {
// If we are in a --download-only method of operation, there is no filesystem monitoring, so no inotify events to check
if (!appConfig.getValueBool("download_only")) {
@ -1352,7 +1281,7 @@ void processResyncDatabaseRemoval(string databaseFilePathToRemove) {
destroy(itemDB);
// delete application sync state
addLogEntry("Deleting the saved application sync status ...");
if (!appConfig.getValueBool("dry_run")) {
if (!dryRun) {
safeRemove(databaseFilePathToRemove);
} else {
// --dry-run scenario ... technically we should not be making any local file changes .......
@ -1364,7 +1293,7 @@ void cleanupDryRunDatabaseFiles(string dryRunDatabaseFile) {
// Temp variables
string dryRunShmFile = dryRunDatabaseFile ~ "-shm";
string dryRunWalFile = dryRunDatabaseFile ~ "-wal";
// If the dry run database exists, clean this up
if (exists(dryRunDatabaseFile)) {
// remove the existing file
@ -1406,23 +1335,159 @@ auto assumeNoGC(T) (T t) if (isFunctionPointer!T || isDelegate!T) {
return cast(SetFunctionAttributes!(T, functionLinkage!T, attrs)) t;
}
// Catch CTRL-C
// Configure the signal handler to catch SIGINT (CTRL-C) and SIGTERM (kill)
void setupSignalHandler() {
sigaction_t sa;
sa.sa_flags = SA_RESETHAND | SA_NODEFER; // Use reset and no defer flags to handle reentrant signals
sa.sa_handler = &exitHandler; // Direct function pointer assignment
sigemptyset(&sa.sa_mask); // Initialize the signal set to empty
// Register the signal handler for SIGINT
if (sigaction(SIGINT, &sa, null) != 0) {
writeln("FATAL: Failed to install SIGINT handler");
exit(-1);
}
// Register the signal handler for SIGTERM
if (sigaction(SIGTERM, &sa, null) != 0) {
writeln("FATAL: Failed to install SIGTERM handler");
exit(-1);
}
}
// Catch SIGINT (CTRL-C) and SIGTERM (kill), handle rapid repeat presses
extern(C) nothrow @nogc @system void exitHandler(int value) {
if (shutdownInProgress) {
return; // Ignore subsequent presses
}
shutdownInProgress = true;
try {
assumeNoGC ( () {
addLogEntry("Got termination signal, performing clean up");
addLogEntry("\nReceived termination signal, initiating cleanup");
// Wait for all parallel jobs that depend on the database to complete
addLogEntry("Waiting for any existing upload|download process to complete");
taskPool.finish(true);
// Was itemDb initialised?
if (itemDB.isDatabaseInitialised()) {
// Make sure the .wal file is incorporated into the main db before we exit
addLogEntry("Shutting down DB connection and merging temporary data");
itemDB.performVacuum();
object.destroy(itemDB);
}
performStandardExitProcess();
syncEngineInstance.shutdown();
// Perform the shutdown process
performSynchronisedExitProcess("exitHandler");
})();
} catch(Exception e) {}
exit(0);
} catch(Exception e) {
// Any output here will cause a GC allocation
// - Error: `@nogc` function `main.exitHandler` cannot call non-@nogc function `std.stdio.writeln!string.writeln`
// - Error: cannot use operator `~` in `@nogc` function `main.exitHandler`
// writeln("Exception during shutdown: " ~ e.msg);
}
// Exit the process with the provided exit code
exit(value);
}
// Handle application exit
void performSynchronisedExitProcess(string scopeCaller = null) {
synchronized {
// Logging the caller of the shutdown procedure
if (!scopeCaller.empty) {
addLogEntry("performSynchronisedExitProcess called by: " ~ scopeCaller, ["debug"]);
}
// Perform cleanup and shutdown of various services and resources
try {
// Shutdown the OneDrive Webhook instance
shutdownOneDriveWebhook();
// Shutdown the client side filtering objects
shutdownSelectiveSync();
// Destroy all 'curl' instances
destroyCurlInstances();
// Shutdown the sync engine
shutdownSyncEngine();
// Shutdown any local filesystem monitoring
shutdownFilesystemMonitor();
// Shutdown the database
shutdownDatabase();
// Shutdown the application configuration objects
shutdownAppConfig();
} catch (Exception e) {
addLogEntry("Error during performStandardExitProcess: " ~ e.toString(), ["error"]);
}
// Finalise all logging and destroy log buffer
shutdownApplicationLogging();
// Perform Garbage Cleanup
GC.collect();
// Return free memory to the OS
GC.minimize();
}
}
void shutdownOneDriveWebhook() {
if (oneDriveWebhook !is null) {
addLogEntry("Shutdown OneDrive Webhook instance", ["debug"]);
oneDriveWebhook.stop();
object.destroy(oneDriveWebhook);
oneDriveWebhook = null;
}
}
void shutdownFilesystemMonitor() {
if (filesystemMonitor !is null) {
addLogEntry("Shutdown Filesystem Monitoring instance", ["debug"]);
filesystemMonitor.shutdown();
object.destroy(filesystemMonitor);
filesystemMonitor = null;
}
}
void shutdownSelectiveSync() {
if (selectiveSync !is null) {
addLogEntry("Shutdown Client Side Filtering instance", ["debug"]);
selectiveSync.shutdown();
object.destroy(selectiveSync);
selectiveSync = null;
}
}
void shutdownSyncEngine() {
if (syncEngineInstance !is null) {
addLogEntry("Shutdown Sync Engine instance", ["debug"]);
syncEngineInstance.shutdown(); // Make sure any running thread completes first
object.destroy(syncEngineInstance);
syncEngineInstance = null;
}
}
void shutdownDatabase() {
if (itemDB !is null && itemDB.isDatabaseInitialised()) {
addLogEntry("Shutdown Database instance", ["debug"]);
itemDB.performVacuum();
object.destroy(itemDB);
itemDB = null;
}
}
void shutdownAppConfig() {
if (appConfig !is null) {
addLogEntry("Shutdown Application Configuration instance", ["debug"]);
if (dryRun) {
// We were running with --dry-run , clean up the applicable database
cleanupDryRunDatabaseFiles(runtimeDatabaseFile);
}
object.destroy(appConfig);
appConfig = null;
}
}
void destroyCurlInstances() {
CurlEngine.destroyAllCurlInstances();
}
void shutdownApplicationLogging() {
// Log that we are exitintg
addLogEntry("Application is exiting.", ["debug"]);
addLogEntry("#######################################################################################################################################", ["logFileOnly"]);
// Destroy the shared logging buffer
(cast() logBuffer).shutdown();
object.destroy(logBuffer);
}

View file

@ -140,8 +140,7 @@ class MonitorBackgroundWorker {
}
}
void startMonitorJob(shared(MonitorBackgroundWorker) worker, Tid callerTid)
{
void startMonitorJob(shared(MonitorBackgroundWorker) worker, Tid callerTid) {
try {
worker.watch(callerTid);
} catch (OwnerTerminated error) {
@ -282,6 +281,11 @@ final class Monitor {
this.selectiveSync = selectiveSync;
}
// The destructor should only clean up resources owned directly by this instance
~this() {
object.destroy(worker);
}
// Initialise the monitor class
void initialise() {
// Configure the variables

File diff suppressed because it is too large Load diff

1565
src/sync.d

File diff suppressed because it is too large Load diff

View file

@ -42,6 +42,7 @@ import curlEngine;
// module variables
shared string deviceName;
ulong previousRSS;
static this() {
deviceName = Socket.hostName;
@ -201,42 +202,58 @@ Regex!char wild2regex(const(char)[] pattern) {
return regex(str, "i");
}
// Test Internet access to Microsoft OneDrive
// Test Internet access to Microsoft OneDrive using a simple HTTP HEAD request
bool testInternetReachability(ApplicationConfig appConfig) {
CurlEngine curlEngine;
bool result = false;
auto http = HTTP();
http.url = "https://login.microsoftonline.com";
// Configure timeouts based on application configuration
http.dnsTimeout = dur!"seconds"(appConfig.getValueLong("dns_timeout"));
http.connectTimeout = dur!"seconds"(appConfig.getValueLong("connect_timeout"));
http.dataTimeout = dur!"seconds"(appConfig.getValueLong("data_timeout"));
http.operationTimeout = dur!"seconds"(appConfig.getValueLong("operation_timeout"));
// Set IP protocol version
http.handle.set(CurlOption.ipresolve, appConfig.getValueLong("ip_protocol_version"));
// Set HTTP method to HEAD for minimal data transfer
http.method = HTTP.Method.head;
// Execute the request and handle exceptions
try {
// Use preconfigured object with all the correct http values assigned
curlEngine = CurlEngine.get();
curlEngine.initialise(appConfig.getValueLong("dns_timeout"), appConfig.getValueLong("connect_timeout"), appConfig.getValueLong("data_timeout"), appConfig.getValueLong("operation_timeout"), appConfig.defaultMaxRedirects, appConfig.getValueBool("debug_https"), appConfig.getValueString("user_agent"), appConfig.getValueBool("force_http_11"), appConfig.getValueLong("rate_limit"), appConfig.getValueLong("ip_protocol_version"));
addLogEntry("Attempting to contact Microsoft OneDrive Login Service");
http.perform();
// Configure the remaining items required
// URL to use
// HTTP connection test method
// Check response for HTTP status code
if (http.statusLine.code >= 200 && http.statusLine.code < 400) {
addLogEntry("Successfully reached Microsoft OneDrive Login Service");
} else {
addLogEntry("Failed to reach Microsoft OneDrive Login Service. HTTP status code: " ~ to!string(http.statusLine.code));
throw new Exception("HTTP Request Failed with Status Code: " ~ to!string(http.statusLine.code));
}
curlEngine.connect(HTTP.Method.head, "https://login.microsoftonline.com");
addLogEntry("Attempting to contact Microsoft OneDrive Login Service", ["debug"]);
curlEngine.http.perform();
addLogEntry("Shutting down HTTP engine as successfully reached OneDrive Login Service", ["debug"]);
result = true;
http.shutdown();
object.destroy(http);
return true;
} catch (SocketException e) {
addLogEntry("HTTP Socket Issue", ["debug"]);
addLogEntry("Cannot connect to Microsoft OneDrive Login Service - Socket Issue");
displayOneDriveErrorMessage(e.msg, getFunctionName!({}));
addLogEntry("Cannot connect to Microsoft OneDrive Service - Socket Issue: " ~ e.msg);
displayOneDriveErrorMessage(e.msg, getFunctionName!({}));
http.shutdown();
object.destroy(http);
return false;
} catch (CurlException e) {
addLogEntry("No Network Connection", ["debug"]);
addLogEntry("Cannot connect to Microsoft OneDrive Login Service - Network Connection Issue");
displayOneDriveErrorMessage(e.msg, getFunctionName!({}));
}
// Shutdown engine
curlEngine.http.shutdown();
curlEngine.releaseAll();
object.destroy(curlEngine);
curlEngine = null;
// Return test result
return result;
addLogEntry("Cannot connect to Microsoft OneDrive Service - Network Connection Issue: " ~ e.msg);
displayOneDriveErrorMessage(e.msg, getFunctionName!({}));
http.shutdown();
object.destroy(http);
return false;
} catch (Exception e) {
addLogEntry("Unexpected error occurred: " ~ e.toString());
displayOneDriveErrorMessage(e.toString(), getFunctionName!({}));
http.shutdown();
object.destroy(http);
return false;
}
}
// Retry Internet access test to Microsoft OneDrive
@ -489,7 +506,6 @@ bool isValidUTF16(string path) {
return true;
}
// Does the path contain any HTML URL encoded items (e.g., '%20' for space)
bool containsURLEncodedItems(string path) {
// Check for null or empty string
@ -636,7 +652,7 @@ void displayFileSystemErrorMessage(string message, string callingFunction) {
addLogEntry(" Error Message: " ~ errorMessage);
// Log the calling function
addLogEntry(" Calling Function: " ~ callingFunction, ["verbose"]);
addLogEntry(" Calling Function: " ~ callingFunction);
try {
// Safely check for disk space
@ -659,32 +675,75 @@ void displayPosixErrorMessage(string message) {
addLogEntry(" Error Message: " ~ message);
}
// Display the Error Message
void displayGeneralErrorMessage(Exception e, string callingFunction=__FUNCTION__, int lineno=__LINE__) {
addLogEntry(); // used rather than writeln
addLogEntry("ERROR: Encounter " ~ e.classinfo.name ~ ":");
addLogEntry(" Error Message: " ~ e.msg);
addLogEntry(" Calling Function: " ~ callingFunction);
addLogEntry(" Line number: " ~ to!string(lineno));
}
// Get the function name that is being called to assist with identifying where an error is being generated
string getFunctionName(alias func)() {
return __traits(identifier, __traits(parent, func)) ~ "()\n";
}
JSONValue fetchOnlineURLContent(string url) {
// Function variables
char[] content;
JSONValue onlineContent;
// Setup HTTP request
HTTP http = HTTP();
// Create an HTTP object within a scope to ensure cleanup
scope(exit) {
http.shutdown();
object.destroy(http);
}
// Configure the URL to access
http.url = url;
// HTTP the connection method
http.method = HTTP.Method.get;
// Data receive handler
http.onReceive = (ubyte[] data) {
content ~= data; // Append data as it's received
return data.length;
};
// Perform HTTP request
http.perform();
// Parse Content
onlineContent = parseJSON(to!string(content));
// Ensure resources are cleaned up
http.shutdown();
object.destroy(http);
// Return onlineResponse
return onlineContent;
}
// Get the latest release version from GitHub
JSONValue getLatestReleaseDetails() {
// Import curl just for this function
import std.net.curl;
char[] content;
JSONValue githubLatest;
JSONValue versionDetails;
string latestTag;
string publishedDate;
// Query GitHub for the 'latest' release details
try {
content = get("https://api.github.com/repos/abraunegg/onedrive/releases/latest");
githubLatest = content.parseJSON();
try {
githubLatest = fetchOnlineURLContent("https://api.github.com/repos/abraunegg/onedrive/releases/latest");
} catch (CurlException e) {
addLogEntry("CurlException: Unable to query GitHub for latest release - " ~ e.msg, ["debug"]);
} catch (JSONException e) {
addLogEntry("JSONException: Unable to parse GitHub JSON response - " ~ e.msg, ["debug"]);
}
// githubLatest has to be a valid JSON object
if (githubLatest.type() == JSONType.object){
// use the returned tag_name
@ -726,9 +785,6 @@ JSONValue getLatestReleaseDetails() {
// Get the release details from the 'current' running version
JSONValue getCurrentVersionDetails(string thisVersion) {
// Import curl just for this function
import std.net.curl;
char[] content;
JSONValue githubDetails;
JSONValue versionDetails;
string versionTag = "v" ~ thisVersion;
@ -736,9 +792,8 @@ JSONValue getCurrentVersionDetails(string thisVersion) {
// Query GitHub for the release details to match the running version
try {
content = get("https://api.github.com/repos/abraunegg/onedrive/releases");
githubDetails = content.parseJSON();
} catch (CurlException e) {
githubDetails = fetchOnlineURLContent("https://api.github.com/repos/abraunegg/onedrive/releases");
} catch (CurlException e) {
addLogEntry("CurlException: Unable to query GitHub for release details - " ~ e.msg, ["debug"]);
return parseJSON(`{"Error": "CurlException", "message": "` ~ e.msg ~ `"}`);
} catch (JSONException e) {
@ -1023,29 +1078,51 @@ string generateAlphanumericString(size_t length = 16) {
return to!string(randomString);
}
// Display internal memory stats pre garbage collection
void displayMemoryUsagePreGC() {
// Display memory usage
writeln();
writeln("Memory Usage pre GC (KB)");
writeln("------------------------");
addLogEntry();
addLogEntry("Memory Usage PRE Garbage Collection (KB)");
addLogEntry("-----------------------------------------------------");
writeMemoryStats();
writeln();
addLogEntry();
}
// Display internal memory stats post garbage collection + RSS (actual memory being used)
void displayMemoryUsagePostGC() {
// Display memory usage
writeln();
writeln("Memory Usage post GC (KB)");
writeln("-------------------------");
addLogEntry("Memory Usage POST Garbage Collection (KB)");
addLogEntry("-----------------------------------------------------");
writeMemoryStats();
writeln();
// Query the actual Resident Set Size (RSS) for the PID
pid_t pid = getCurrentPID();
ulong rss = getRSS(pid);
addLogEntry("current Resident Set Size (RSS) = " ~ to!string(rss)); // actual memory in RAM used by the process - this needs to remain stable, already in KB
// Is there a previous value
if (previousRSS != 0) {
addLogEntry("previous Resident Set Size (RSS) = " ~ to!string(previousRSS)); // actual memory in RAM used by the process - this needs to remain stable, already in KB
// Increase or decrease in RSS
if (rss > previousRSS) {
addLogEntry("difference in Resident Set Size (RSS) = +" ~ to!string((rss - previousRSS))); // Difference in actual memory used
} else {
addLogEntry("difference in Resident Set Size (RSS) = -" ~ to!string((previousRSS - rss))); // Difference in actual memory used
}
}
// Update previous RSS with new value
previousRSS = rss;
// Closout
addLogEntry();
}
// Write internal memory stats
void writeMemoryStats() {
// write memory stats
writeln("memory usedSize = ", (GC.stats.usedSize/1024));
writeln("memory freeSize = ", (GC.stats.freeSize/1024));
writeln("memory allocatedInCurrentThread = ", (GC.stats.allocatedInCurrentThread/1024));
addLogEntry("current memory usedSize = " ~ to!string((GC.stats.usedSize/1024))); // number of used bytes on the GC heap (might only get updated after a collection)
addLogEntry("current memory freeSize = " ~ to!string((GC.stats.freeSize/1024))); // number of free bytes on the GC heap (might only get updated after a collection)
addLogEntry("current memory allocatedInCurrentThread = " ~ to!string((GC.stats.allocatedInCurrentThread/1024))); // number of bytes allocated for current thread since program start
}
// Return the username of the UID running the 'onedrive' process
@ -1116,9 +1193,56 @@ int calc_eta(size_t counter, size_t iterations, ulong start_time) {
}
}
// Force Exit
void forceExit() {
// Allow logging to flush and complete
Thread.sleep(dur!("msecs")(500));
// Force Exit
exit(EXIT_FAILURE);
}
}
// Get the current PID of the application
pid_t getCurrentPID() {
// The '/proc/self' is a symlink to the current process's proc directory
string path = "/proc/self/stat";
// Read the content of the stat file
string content;
try {
content = readText(path);
} catch (Exception e) {
writeln("Failed to read stat file: ", e.msg);
return 0;
}
// The first value in the stat file is the PID
auto parts = split(content);
return to!pid_t(parts[0]); // Convert the first part to pid_t
}
// Access the Resident Set Size (RSS) based on the PID of the running application
ulong getRSS(pid_t pid) {
// Construct the path to the statm file for the given PID
string path = format("/proc/%s/statm", to!string(pid));
// Read the content of the file
string content;
try {
content = readText(path);
} catch (Exception e) {
writeln("Failed to read statm file: ", e.msg);
return 0;
}
// Split the content and get the RSS (second value)
auto stats = split(content);
if (stats.length < 2) {
writeln("Unexpected format in statm file.");
return 0;
}
// RSS is in pages, convert it to kilobytes
ulong rssPages = to!ulong(stats[1]);
ulong rssKilobytes = rssPages * sysconf(_SC_PAGESIZE) / 1024;
return rssKilobytes;
}

View file

@ -78,8 +78,10 @@ class OneDriveWebhook {
} catch (OneDriveException e) {
logSubscriptionError(e);
}
oneDriveApiInstance.shutdown();
object.destroy(oneDriveApiInstance);
// Release API instance back to the pool
oneDriveApiInstance.releaseCurlEngine();
object.destroy(oneDriveApiInstance);
oneDriveApiInstance = null;
}
private static void handle(shared OneDriveWebhook _this, Cgi cgi) {