Merge remote-tracking branch 'upstream/dev' into call-detailed-info

pull/1061/head
Morgan Pretty 3 months ago
commit d0a79f2ba6

@ -7687,7 +7687,7 @@
CLANG_WARN__ARC_BRIDGE_CAST_NONARC = YES;
CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
CODE_SIGN_IDENTITY = "iPhone Developer";
CURRENT_PROJECT_VERSION = 526;
CURRENT_PROJECT_VERSION = 530;
ENABLE_BITCODE = NO;
ENABLE_STRICT_OBJC_MSGSEND = YES;
ENABLE_TESTABILITY = YES;
@ -7724,7 +7724,7 @@
GCC_WARN_UNUSED_VARIABLE = YES;
HEADER_SEARCH_PATHS = "";
IPHONEOS_DEPLOYMENT_TARGET = 13.0;
MARKETING_VERSION = 2.8.6;
MARKETING_VERSION = 2.8.8;
ONLY_ACTIVE_ARCH = YES;
OTHER_CFLAGS = (
"-fobjc-arc-exceptions",
@ -7766,7 +7766,7 @@
CLANG_WARN__ARC_BRIDGE_CAST_NONARC = YES;
CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
CODE_SIGN_IDENTITY = "iPhone Distribution";
CURRENT_PROJECT_VERSION = 526;
CURRENT_PROJECT_VERSION = 530;
ENABLE_BITCODE = NO;
ENABLE_STRICT_OBJC_MSGSEND = YES;
GCC_NO_COMMON_BLOCKS = YES;
@ -7798,7 +7798,7 @@
GCC_WARN_UNUSED_VARIABLE = YES;
HEADER_SEARCH_PATHS = "";
IPHONEOS_DEPLOYMENT_TARGET = 13.0;
MARKETING_VERSION = 2.8.6;
MARKETING_VERSION = 2.8.8;
ONLY_ACTIVE_ARCH = NO;
OTHER_CFLAGS = (
"-DNS_BLOCK_ASSERTIONS=1",
@ -7829,7 +7829,6 @@
CODE_SIGN_ENTITLEMENTS = Session/Meta/Signal.entitlements;
CODE_SIGN_IDENTITY = "iPhone Developer";
"CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer";
CURRENT_PROJECT_VERSION = 529;
DEVELOPMENT_TEAM = SUQ8J2PCT7;
FRAMEWORK_SEARCH_PATHS = (
"$(inherited)",
@ -7867,7 +7866,6 @@
"$(SRCROOT)",
);
LLVM_LTO = NO;
MARKETING_VERSION = 2.8.8;
OTHER_LDFLAGS = "$(inherited)";
OTHER_SWIFT_FLAGS = "$(inherited) \"-D\" \"COCOAPODS\" \"-DDEBUG\"";
PRODUCT_BUNDLE_IDENTIFIER = "com.loki-project.loki-messenger";
@ -7900,7 +7898,6 @@
CODE_SIGN_ENTITLEMENTS = Session/Meta/Signal.entitlements;
CODE_SIGN_IDENTITY = "iPhone Developer";
"CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer";
CURRENT_PROJECT_VERSION = 529;
DEVELOPMENT_TEAM = SUQ8J2PCT7;
FRAMEWORK_SEARCH_PATHS = (
"$(inherited)",
@ -7938,7 +7935,6 @@
"$(SRCROOT)",
);
LLVM_LTO = NO;
MARKETING_VERSION = 2.8.8;
OTHER_LDFLAGS = "$(inherited)";
PRODUCT_BUNDLE_IDENTIFIER = "com.loki-project.loki-messenger";
PRODUCT_NAME = Session;

@ -389,7 +389,7 @@ public final class SessionCall: CurrentCallProtocol, WebRTCSessionDelegate {
using: dependencies
)
},
completion: { _, _ in
completion: { _ in
Singleton.callManager.suspendDatabaseIfCallEndedInBackground()
}
)

@ -269,7 +269,7 @@ extension ConversationVC:
updates: { db in
db[.isGiphyEnabled] = true
},
completion: { _, _ in
completion: { _ in
DispatchQueue.main.async {
self?.handleGIFButtonTapped()
}
@ -1252,8 +1252,7 @@ extension ConversationVC:
body: .attributedText(
"urlOpenDescription"
.put(key: "url", value: url.absoluteString)
.localizedFormatted(baseFont: .systemFont(ofSize: Values.smallFontSize)),
canScroll: true
.localizedFormatted(baseFont: .systemFont(ofSize: Values.smallFontSize))
),
confirmTitle: "open".localized(),
confirmStyle: .danger,
@ -1263,9 +1262,11 @@ extension ConversationVC:
UIApplication.shared.open(url, options: [:], completionHandler: nil)
self?.showInputAccessoryView()
},
onCancel: { [weak self] _ in
onCancel: { [weak self] modal in
UIPasteboard.general.string = url.absoluteString
self?.showInputAccessoryView()
modal.dismiss(animated: true) {
self?.showInputAccessoryView()
}
}
)
)

@ -554,12 +554,15 @@ final class ConversationVC: BaseVC, LibSessionRespondingViewController, Conversa
) &&
viewModel.threadData.threadIsNoteToSelf == false &&
viewModel.threadData.threadShouldBeVisible == false &&
!LibSession.conversationInConfig(
threadId: threadId,
threadVariant: viewModel.threadData.threadVariant,
visibleOnly: false,
using: viewModel.dependencies
)
!Storage.shared.read({ [dependencies = viewModel.dependencies, threadVariant = viewModel.threadData.threadVariant] db in
LibSession.conversationInConfig(
db,
threadId: threadId,
threadVariant: threadVariant,
visibleOnly: false,
using: dependencies
)
}).defaulting(to: false)
{
Storage.shared.writeAsync { db in
_ = try SessionThread // Intentionally use `deleteAll` here instead of `deleteOrLeave`
@ -659,7 +662,7 @@ final class ConversationVC: BaseVC, LibSessionRespondingViewController, Conversa
// PagedDatabaseObserver won't have them so we need to force a re-fetch of the current
// data to ensure everything is up to date
if didReturnFromBackground {
DispatchQueue.global(qos: .background).asyncAfter(deadline: .now() + 0.01) {
DispatchQueue.global(qos: .background).async {
self?.viewModel.pagedDataObserver?.reload()
}
}

@ -198,7 +198,7 @@ public class ConversationViewModel: OWSAudioPlayerDelegate, NavigatableStateHold
)
// Run the initial query on a background thread so we don't block the push transition
DispatchQueue.global(qos: .userInitiated).asyncAfter(deadline: .now() + 0.01) { [weak self] in
DispatchQueue.global(qos: .userInitiated).async { [weak self] in
// If we don't have a `initialFocusedInfo` then default to `.pageBefore` (it'll query
// from a `0` offset)
switch (focusedInteractionInfo ?? initialData?.initialUnreadInteractionInfo) {

@ -567,7 +567,7 @@ class AppDelegate: UIResponder, UIApplicationDelegate, UNUserNotificationCenterD
/// There is a warning which can happen on launch because the Database read can be blocked by another database operation
/// which could result in this blocking the main thread, as a result we want to check the identity exists on a background thread
/// and then return to the main thread only when required
DispatchQueue.global(qos: .default).asyncAfter(deadline: .now() + 0.01) { [weak self] in
DispatchQueue.global(qos: .default).async { [weak self] in
guard Identity.userExists() else { return }
self?.enableBackgroundRefreshIfNecessary()
@ -682,7 +682,7 @@ class AppDelegate: UIResponder, UIApplicationDelegate, UNUserNotificationCenterD
/// We want to start observing the changes for the 'HomeVC' and want to wait until we actually get data back before we
/// continue as we don't want to show a blank home screen
DispatchQueue.global(qos: .userInitiated).asyncAfter(deadline: .now() + 0.01) {
DispatchQueue.global(qos: .userInitiated).async {
viewController.startObservingChanges() {
populateHomeScreenTimer.invalidate()
@ -722,7 +722,7 @@ class AppDelegate: UIResponder, UIApplicationDelegate, UNUserNotificationCenterD
/// On application startup the `Storage.read` can be slightly slow while GRDB spins up it's database
/// read pools (up to a few seconds), since this read is blocking we want to dispatch it to run async to ensure
/// we don't block user interaction while it's running
DispatchQueue.global(qos: .default).asyncAfter(deadline: .now() + 0.01) {
DispatchQueue.global(qos: .default).async {
let unreadCount: Int = Storage.shared
.read { db in try Interaction.fetchUnreadCount(db) }
.defaulting(to: 0)
@ -817,10 +817,7 @@ class AppDelegate: UIResponder, UIApplicationDelegate, UNUserNotificationCenterD
/// Start the pollers on a background thread so that any database queries they need to run don't
/// block the main thread
///
/// **Note:** We add a delay of `0.01` to prevent potential database re-entrancy if this is triggered
/// within the completion block of a database transaction, this gives it the time to complete the transaction
DispatchQueue.global(qos: .background).asyncAfter(deadline: .now() + 0.01) { [weak self] in
DispatchQueue.global(qos: .background).async { [weak self] in
self?.poller.start()
guard shouldStartGroupPollers else { return }

@ -133,6 +133,12 @@ final class MainAppContext: AppContext {
}
func setStatusBarHidden(_ isHidden: Bool, animated isAnimated: Bool) {
guard Thread.isMainThread else {
return DispatchQueue.main.async { [weak self] in
self?.setStatusBarHidden(isHidden, animated: isAnimated)
}
}
UIApplication.shared.setStatusBarHidden(isHidden, with: (isAnimated ? .slide : .none))
}
@ -154,6 +160,12 @@ final class MainAppContext: AppContext {
// stringlint:ignore_contents
func ensureSleepBlocking(_ shouldBeBlocking: Bool, blockingObjects: [Any]) {
guard Thread.isMainThread else {
return DispatchQueue.main.async { [weak self] in
self?.ensureSleepBlocking(shouldBeBlocking, blockingObjects: blockingObjects)
}
}
if UIApplication.shared.isIdleTimerDisabled != shouldBeBlocking {
if shouldBeBlocking {
var logString: String = "Blocking sleep because of: \(String(describing: blockingObjects.first))"
@ -171,6 +183,12 @@ final class MainAppContext: AppContext {
}
func setNetworkActivityIndicatorVisible(_ value: Bool) {
guard Thread.isMainThread else {
return DispatchQueue.main.async { [weak self] in
self?.setNetworkActivityIndicatorVisible(value)
}
}
UIApplication.shared.isNetworkActivityIndicatorVisible = value
}

File diff suppressed because one or more lines are too long

@ -498,7 +498,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "اِقبَل"
"value" : "قبول"
}
},
"az" : {
@ -540,7 +540,7 @@
"cs" : {
"stringUnit" : {
"state" : "translated",
"value" : "Přijmou"
"value" : "Přijmout"
}
},
"cy" : {
@ -6767,7 +6767,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "لايمكن ازاله الادمن."
"value" : "لا يمكن إزاله المشرف."
}
},
"az" : {
@ -10635,7 +10635,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "فشل الترقيه كمسئول"
"value" : "فشل الترقيه كمشرف"
}
},
"az" : {
@ -12586,7 +12586,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "تم إرسال الترقيه كمسئول"
"value" : "تم إرسال الترقية كمشرف"
}
},
"az" : {
@ -14040,7 +14040,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "لا يوجد مسؤولين في هذا المجتمع."
"value" : "لا يوجد مشرفين في هذا المجتمع."
}
},
"az" : {
@ -17452,7 +17452,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "اعدادات المسؤول"
"value" : "إعدادات المشرف"
}
},
"az" : {
@ -30942,7 +30942,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "هل أنت متيقِّن من أنك تريد مسح كافة المرفقات؟ سيتم أيضًا حذف الرسائل ذات المرفقات."
"value" : "هل أنت متأكد من حذف كافة المرفقات؟ سيتم أيضًا حذف الرسائل ذات المرفقات."
}
},
"az" : {
@ -36247,7 +36247,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "غير قادر على فتح الملف."
"value" : "تعذر فتح الملف."
}
},
"az" : {
@ -48246,7 +48246,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "انضغط لتنزيل {file_type}"
"value" : "انقر لتنزيل {file_type}"
}
},
"az" : {
@ -60287,7 +60287,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "هل أنت متيقِّن من حظر <b>{name}؟</b> المستخدمين المحظورين لايمكنهم إرسال طلبات الرسائل، دعوات المجموعات أو الإتصال بك."
"value" : "هل أنت متأكد من حظر <b>{name}؟</b> المستخدمين المحظورين لا يمكنهم إرسال طلبات الرسائل، دعوات المجموعات أو الاتصال بك."
}
},
"az" : {
@ -72087,7 +72087,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "{app_name} Call"
"value" : "مكالمة {app_name}"
}
},
"az" : {
@ -74075,7 +74075,7 @@
"en" : {
"stringUnit" : {
"state" : "translated",
"value" : "Your IP is visible to your call partner and an Oxen Foundation server while using beta calls."
"value" : "Your IP is visible to your call partner and a Session Technology Foundation server while using beta calls."
}
},
"eo" : {
@ -77841,7 +77841,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "{app_name} يحتاج إذن الوصول إلى الكاميرا لالتقاط الصور ومقاطع الفيديو، أو لمسح رموز الاستجابة السريعة."
"value" : "{app_name} يحتاج إذن الوصول إلى الكاميرا لالتقاط الصور ومقاطع الفيديو، أو لمسح رموز QR."
}
},
"az" : {
@ -78326,7 +78326,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "{app_name} يحتاج إذن الوصول إلى الكاميرا لمسح رموز الاستجابة السريعة"
"value" : "{app_name} يحتاج إذن الوصول إلى الكاميرا لمسح رموز QR"
}
},
"az" : {
@ -82203,6 +82203,48 @@
}
}
},
"ar" : {
"variations" : {
"plural" : {
"few" : {
"stringUnit" : {
"state" : "translated",
"value" : "لم يتم حذف البيانات بواسطة %lld من عقد الخدمة Service Nodes. معرفات عقد الخدمة Service Node IDs: {service_node_id}."
}
},
"many" : {
"stringUnit" : {
"state" : "translated",
"value" : "لم يتم حذف البيانات بواسطة %lld من عقد الخدمة Service Nodes. معرفات عقد الخدمة Service Node IDs: {service_node_id}."
}
},
"one" : {
"stringUnit" : {
"state" : "translated",
"value" : "لم يتم حذف البيانات بواسطة عقدة الخدمة (Service Node) %lld . معرف عقدة الخدمة (Service Node ID): {service_node_id}."
}
},
"other" : {
"stringUnit" : {
"state" : "translated",
"value" : "لم يتم حذف البيانات بواسطة %lld من عقد الخدمة Service Nodes. معرفات عقد الخدمة Service Node IDs: {service_node_id}."
}
},
"two" : {
"stringUnit" : {
"state" : "translated",
"value" : "لم يتم حذف البيانات بواسطة %lld من عقد الخدمة Service Nodes. معرفات عقد الخدمة Service Node IDs: {service_node_id}."
}
},
"zero" : {
"stringUnit" : {
"state" : "translated",
"value" : "لم يتم حذف البيانات بواسطة %lld من عقد الخدمة Service Nodes. معرفات عقد الخدمة Service Node IDs: {service_node_id}."
}
}
}
}
},
"az" : {
"variations" : {
"plural" : {
@ -85657,7 +85699,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "هل متأكد من رغبتك لمسح جهازك؟"
"value" : "هل متأكد من رغبتك بمسح جهازك؟"
}
},
"az" : {
@ -87112,7 +87154,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "هل أنت متيقِّن من مسح جميع الرسائل من محادثتك مع <b>{name}</b> من جهازك؟"
"value" : "هل أنت متأكد من مسح جميع الرسائل من محادثتك مع <b>{name}</b> من جهازك؟"
}
},
"az" : {
@ -87597,7 +87639,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "هل أنت متيقِّن من أنك تريد حذف كافة الرسائل <b>{community_name}؟</b> من جهازك."
"value" : "هل أنت متأكد من حذف كافة الرسائل <b>{community_name}؟</b> من جهازك."
}
},
"az" : {
@ -89052,7 +89094,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "هل أنت متيقِّن من أنك تريد حذف كافة الرسائل <b>{group_name}؟</b>"
"value" : "هل أنت متأكد من حذف كافة الرسائل <b>{group_name}؟</b>"
}
},
"az" : {
@ -89537,7 +89579,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "هل أنت متيقِّن من أنك تريد حذف كافة الرسائل <b>{group_name}؟</b> من جهازك."
"value" : "هل أنت متأكد من حذف كافة الرسائل <b>{group_name}؟</b> من جهازك."
}
},
"az" : {
@ -90022,7 +90064,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "هل أنت متيقِّن من أنك تريد حذف كافة رسائل ملاحظة لنفسي من جهازك؟"
"value" : "هل أنت متأكد من مسح كافة رسائل \"ملاحظة لنفسي\" من جهازك؟"
}
},
"az" : {
@ -94806,7 +94848,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "عذراً، حدث خطأ. حاول مرة أخرى لاحقاً."
"value" : "عفواً، حدث خطأ. الرجاء المحاولة مرة أخرى لاحقاً."
}
},
"az" : {
@ -102506,7 +102548,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "هل أنت متيقِّن من أنك تريد مسح <b>{name}؟</b> من قائمة جهات إتصالك؟ ستصل أي رسائل جديدة من <b>{name}</b> كطلب رسالة."
"value" : "هل أنت متأكد من حذف <b>{name}؟</b> من قائمة جهات إتصالك؟ ستصل أي رسائل جديدة من <b>{name}</b> كطلب رسالة."
}
},
"az" : {
@ -103470,7 +103512,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "اختر اتصالات"
"value" : "تحديد جهات الاتصال"
}
},
"az" : {
@ -116499,7 +116541,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "مراسلة جديدة"
"value" : "محادثة جديدة"
}
},
"az" : {
@ -128055,7 +128097,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "عند التمكين، سيتم إشعارك فقط بالرسائل التي تذكرك."
"value" : "عند التمكين، سيتم إشعارك بالرسائل التي تشير إليك فقط."
}
},
"az" : {
@ -135105,6 +135147,58 @@
"deleteMessageConfirm" : {
"extractionState" : "manual",
"localizations" : {
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "%#@arg1@"
},
"substitutions" : {
"arg1" : {
"argNum" : 1,
"formatSpecifier" : "lld",
"variations" : {
"plural" : {
"few" : {
"stringUnit" : {
"state" : "translated",
"value" : "هل أنت متأكد من حذف الرسائل؟"
}
},
"many" : {
"stringUnit" : {
"state" : "translated",
"value" : "هل أنت متأكد من حذف الرسائل؟"
}
},
"one" : {
"stringUnit" : {
"state" : "translated",
"value" : "هل أنت متأكد من حذف الرسالة؟"
}
},
"other" : {
"stringUnit" : {
"state" : "translated",
"value" : "هل أنت متأكد من حذف الرسائل؟"
}
},
"two" : {
"stringUnit" : {
"state" : "translated",
"value" : "هل أنت متأكد من حذف الرسائل؟"
}
},
"zero" : {
"stringUnit" : {
"state" : "translated",
"value" : "هل أنت متأكد من حذف الرسائل؟"
}
}
}
}
}
}
},
"cs" : {
"stringUnit" : {
"state" : "translated",
@ -140766,6 +140860,58 @@
}
}
},
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "%#@arg1@"
},
"substitutions" : {
"arg1" : {
"argNum" : 1,
"formatSpecifier" : "lld",
"variations" : {
"plural" : {
"few" : {
"stringUnit" : {
"state" : "translated",
"value" : "فشل حذف الرسائل"
}
},
"many" : {
"stringUnit" : {
"state" : "translated",
"value" : "فشل حذف الرسائل"
}
},
"one" : {
"stringUnit" : {
"state" : "translated",
"value" : "فشل حذف الرسالة"
}
},
"other" : {
"stringUnit" : {
"state" : "translated",
"value" : "فشل حذف الرسائل"
}
},
"two" : {
"stringUnit" : {
"state" : "translated",
"value" : "فشل حذف الرسائل"
}
},
"zero" : {
"stringUnit" : {
"state" : "translated",
"value" : "فشل حذف الرسائل"
}
}
}
}
}
}
},
"az" : {
"stringUnit" : {
"state" : "translated",
@ -143175,7 +143321,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "هل أنت متيقِّن من أنك تريد مسح هذه الرسائل لدى الجميع؟"
"value" : "هل أنت متأكد من حذف هذه الرسائل لدى الجميع؟"
}
},
"az" : {
@ -149088,7 +149234,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "ينطبق هذا الإعداد على الجميع في هذه المحادثة.<br/>يمكن لمسؤولي المجموعة فقط تغيير هذا الإعداد."
"value" : "ينطبق هذا الإعداد على الجميع في هذه المحادثة.<br/>يمكن لمشرفي المجموعة فقط تغيير هذا الإعداد."
}
},
"az" : {
@ -169283,7 +169429,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "حيوانات & و طبيعة"
"value" : "حيوانات و طبيعة"
}
},
"az" : {
@ -173121,7 +173267,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "هل أنت متيقِّن من أنك تريد مسح كافة {emoji}؟"
"value" : "هل أنت متأكد من مسح كافة {emoji}؟"
}
},
"az" : {
@ -187595,7 +187741,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "هل أنت متيقِّن من أنك تريد مسح <b>{group_name}؟</b> سيؤدي ذلك إلى إزالة جميع الأعضاء وحذف كافة محتويات المجموعة."
"value" : "هل أنت متأكد من حذف <b>{group_name}؟</b> سيؤدي ذلك إلى إزالة جميع الأعضاء وحذف كافة محتويات المجموعة."
}
},
"az" : {
@ -188082,6 +188228,12 @@
"groupDeletedMemberDescription" : {
"extractionState" : "manual",
"localizations" : {
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "{group_name} تم حذفه بواسطة مشرف المجموعة. لن تتمكن من إرسال أي رسائل أخرى."
}
},
"cs" : {
"stringUnit" : {
"state" : "translated",
@ -191497,7 +191649,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "هل أنت متأكد من أنك تريد حذف دعوة المجموعة هذه؟"
"value" : "هل أنت متأكد من حذف دعوة المجموعة؟"
}
},
"az" : {
@ -209021,6 +209173,17 @@
}
}
},
"groupNotUpdatedWarning" : {
"extractionState" : "manual",
"localizations" : {
"en" : {
"stringUnit" : {
"state" : "translated",
"value" : "Group has not been updated in over 30 days. You may experience issues sending messages or viewing Group information."
}
}
}
},
"groupOnlyAdmin" : {
"extractionState" : "manual",
"localizations" : {
@ -209030,6 +209193,12 @@
"value" : "Jy is die enigste administrateur in <b>{group_name}</b>.<br/><br/>Groepslede en instellings kan nie verander word sonder 'n administrateur nie."
}
},
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "أنت المشرف الوحيد في\n<b>{group_name}</b>.<br/><br/>لا يمكن تغيير أعضاء المجموعة والإعدادات بدون المشرف."
}
},
"az" : {
"stringUnit" : {
"state" : "translated",
@ -209497,11 +209666,23 @@
"groupPendingRemoval" : {
"extractionState" : "manual",
"localizations" : {
"cs" : {
"stringUnit" : {
"state" : "translated",
"value" : "Čeká na odebrání"
}
},
"en" : {
"stringUnit" : {
"state" : "translated",
"value" : "Pending removal"
}
},
"nl" : {
"stringUnit" : {
"state" : "translated",
"value" : "In afwachting van verwijdering"
}
}
}
},
@ -229919,6 +230100,72 @@
}
}
},
"inviteFailed" : {
"extractionState" : "manual",
"localizations" : {
"en" : {
"stringUnit" : {
"state" : "translated",
"value" : "%#@arg1@"
},
"substitutions" : {
"arg1" : {
"argNum" : 1,
"formatSpecifier" : "lld",
"variations" : {
"plural" : {
"one" : {
"stringUnit" : {
"state" : "translated",
"value" : "Invite Failed"
}
},
"other" : {
"stringUnit" : {
"state" : "translated",
"value" : "Invites Failed"
}
}
}
}
}
}
}
}
},
"inviteFailedDescription" : {
"extractionState" : "manual",
"localizations" : {
"en" : {
"stringUnit" : {
"state" : "translated",
"value" : "%#@arg1@"
},
"substitutions" : {
"arg1" : {
"argNum" : 1,
"formatSpecifier" : "lld",
"variations" : {
"plural" : {
"one" : {
"stringUnit" : {
"state" : "translated",
"value" : "The invite could not be sent. Would you like to try again?"
}
},
"other" : {
"stringUnit" : {
"state" : "translated",
"value" : "The invites could not be sent. Would you like to try again?"
}
}
}
}
}
}
}
}
},
"join" : {
"extractionState" : "manual",
"localizations" : {
@ -232314,6 +232561,61 @@
}
}
},
"legacyGroupAfterDeprecationAdmin" : {
"extractionState" : "manual",
"localizations" : {
"en" : {
"stringUnit" : {
"state" : "translated",
"value" : "This group is now read-only. Recreate this group to keep chatting."
}
}
}
},
"legacyGroupAfterDeprecationMember" : {
"extractionState" : "manual",
"localizations" : {
"en" : {
"stringUnit" : {
"state" : "translated",
"value" : "This group is now read-only. Ask the group admin to recreate this group to keep chatting."
}
}
}
},
"legacyGroupBeforeDeprecationAdmin" : {
"extractionState" : "manual",
"localizations" : {
"en" : {
"stringUnit" : {
"state" : "translated",
"value" : "Groups have been upgraded! Recreate this group for improved reliability. This group will become read-only on {date}."
}
}
}
},
"legacyGroupBeforeDeprecationMember" : {
"extractionState" : "manual",
"localizations" : {
"en" : {
"stringUnit" : {
"state" : "translated",
"value" : "Groups have been upgraded! Ask the group admin to recreate this group for improved reliability. This group will become read-only on {date}."
}
}
}
},
"legacyGroupChatHistory" : {
"extractionState" : "manual",
"localizations" : {
"en" : {
"stringUnit" : {
"state" : "translated",
"value" : "Chat history will not be transferred to the new group. You can still view all chat history in your old group."
}
}
}
},
"legacyGroupMemberNew" : {
"extractionState" : "manual",
"localizations" : {
@ -236613,7 +236915,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "غير قادر على تحميل معاينة الرابط"
"value" : "تعذر تحميل معاينة الرابط"
}
},
"az" : {
@ -268443,7 +268745,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "هل أنت متأكد من أنك تريد مسح كافة طلبات الرسائل ودعوات المجموعات؟"
"value" : "هل أنت متأكد من مسح كافة طلبات الرسائل ودعوات المجموعات؟"
}
},
"az" : {
@ -280417,7 +280719,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "تعيين الاسم"
"value" : "تعيين الاسم المستعار"
}
},
"az" : {
@ -283770,7 +284072,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "هل أنت متأكد من أنك تريد إخفاء الملاحظة لنفسي؟"
"value" : "هل أنت متأكد من إخفاء \"الملاحظة لنفسي\"؟"
}
},
"az" : {
@ -305307,7 +305609,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "{app_name} مصمم لحماية خصوصيتك."
"value" : "{app_name} مُصمم لحماية خصوصيتك."
}
},
"az" : {
@ -312516,7 +312818,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "افتح"
"value" : "فتح"
}
},
"az" : {
@ -330724,7 +331026,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "منح إحدى الأذونات مطلوب"
"value" : "الإذن مطلوب"
}
},
"az" : {
@ -333592,7 +333894,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "ًًًُُثَبت"
"value" : "تثبيت"
}
},
"az" : {
@ -334550,7 +334852,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "الغ التثبيت"
"value" : "إلغاء التثبيت"
}
},
"az" : {
@ -339334,6 +339636,72 @@
}
}
},
"promotionFailed" : {
"extractionState" : "manual",
"localizations" : {
"en" : {
"stringUnit" : {
"state" : "translated",
"value" : "%#@arg1@"
},
"substitutions" : {
"arg1" : {
"argNum" : 1,
"formatSpecifier" : "lld",
"variations" : {
"plural" : {
"one" : {
"stringUnit" : {
"state" : "translated",
"value" : "Promotion Failed"
}
},
"other" : {
"stringUnit" : {
"state" : "translated",
"value" : "Promotions Failed"
}
}
}
}
}
}
}
}
},
"promotionFailedDescription" : {
"extractionState" : "manual",
"localizations" : {
"en" : {
"stringUnit" : {
"state" : "translated",
"value" : "%#@arg1@"
},
"substitutions" : {
"arg1" : {
"argNum" : 1,
"formatSpecifier" : "lld",
"variations" : {
"plural" : {
"one" : {
"stringUnit" : {
"state" : "translated",
"value" : "The promotion could not be applied. Would you like to try again?"
}
},
"other" : {
"stringUnit" : {
"state" : "translated",
"value" : "The promotions could not be applied. Would you like to try again?"
}
}
}
}
}
}
}
}
},
"qrCode" : {
"extractionState" : "manual",
"localizations" : {
@ -341262,7 +341630,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "عرض QR"
"value" : "عرض رمز QR"
}
},
"az" : {
@ -347962,7 +348330,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "الرجاء التحقق من كلمة المرور الخاصة بالاسترداد وحاول مرة أخرى."
"value" : "يرجى التحقق من كلمة مرور الاسترداد الخاصة بك وحاول مرة أخرى."
}
},
"az" : {
@ -352279,7 +352647,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "إخفاء كلمة المرور الخاصة بالاسترداد على هذا الجهاز بشكل دائم."
"value" : "قم بإخفاء كلمة مرور الاسترداد بشكل دائم على هذا الجهاز."
}
},
"az" : {
@ -354183,6 +354551,17 @@
}
}
},
"recreateGroup" : {
"extractionState" : "manual",
"localizations" : {
"en" : {
"stringUnit" : {
"state" : "translated",
"value" : "Recreate Group"
}
}
}
},
"redo" : {
"extractionState" : "manual",
"localizations" : {
@ -366286,7 +366665,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "لم يتم العثور على أي نتيجة."
"value" : "لا توجد نتائج."
}
},
"az" : {
@ -368202,7 +368581,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "حدد"
"value" : "تحديد"
}
},
"az" : {
@ -368681,7 +369060,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "اختر الكل"
"value" : "تحديد الكل"
}
},
"az" : {
@ -370597,7 +370976,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "مظهر"
"value" : "المظهر"
}
},
"az" : {
@ -376842,7 +377221,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "شارك"
"value" : "مشاركة"
}
},
"az" : {
@ -378740,6 +379119,17 @@
}
}
},
"shareExtensionNoAccountError" : {
"extractionState" : "manual",
"localizations" : {
"en" : {
"stringUnit" : {
"state" : "translated",
"value" : "Oops! Looks like you don't have a {app_name} account yet.<br/><br/>You'll need to create one in the {app_name} app before you can share."
}
}
}
},
"shareToSession" : {
"extractionState" : "manual",
"localizations" : {
@ -379225,7 +379615,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "أظهر"
"value" : "إظهار"
}
},
"az" : {
@ -383548,7 +383938,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "حاول مرة أخرى"
"value" : "حاول مجدداً"
}
},
"az" : {
@ -391721,7 +392111,7 @@
"ar" : {
"stringUnit" : {
"state" : "translated",
"value" : "افتح الرابط"
"value" : "فتح الرابط"
}
},
"az" : {

@ -292,11 +292,9 @@ public class PushRegistrationManager: NSObject, PKPushRegistryDelegate, PushRegi
dependencies.storage.resumeDatabaseAccess()
LibSession.resumeNetworkAccess()
let maybeCall: SessionCall? = Storage.shared.read { [dependencies = self.dependencies] db in
var call: SessionCall? = nil
let maybeCall: SessionCall? = Storage.shared.read { [dependencies = self.dependencies] db -> SessionCall? in
do {
call = SessionCall(
let call: SessionCall = SessionCall(
db,
for: caller,
uuid: uuid,
@ -309,12 +307,14 @@ public class PushRegistrationManager: NSObject, PKPushRegistryDelegate, PushRegi
.filter(Interaction.Columns.messageUuid == uuid)
.fetchOne(db)
call?.callInteractionId = interaction?.id
} catch {
call.callInteractionId = interaction?.id
return call
}
catch {
SNLog("[Calls] Failed to create call due to error: \(error)")
}
return call
return nil
}
guard let call: SessionCall = maybeCall else {

@ -159,10 +159,11 @@ struct LandingScreen: View {
UIApplication.shared.open(url)
}
},
onCancel: { _ in
onCancel: { modal in
if let url: URL = URL(string: "https://getsession.org/privacy-policy") {
UIApplication.shared.open(url)
}
modal.close()
}
)
)

@ -313,7 +313,6 @@ class DeveloperSettingsViewModel: SessionTableViewModel, NavigatableStateHolder,
sourcePath: FileManager.default.appSharedDataDirectoryPath,
destinationPath: backupFile,
filenamesToExclude: [
".DS_Store",
"\(Storage.dbFileName)-wal",
"\(Storage.dbFileName)-shm"
],

@ -343,12 +343,10 @@ public enum GarbageCollectionJob: JobExecutor {
.deleteAll(db)
}
},
completion: { _, _ in
completion: { _ in
// Dispatch async so we can swap from the write queue to a read one (we are done
// writing), this has to be done after a slight delay to ensure the transaction
// provided by the completion block completes first (ie. so we don't hit
// re-entrancy issues)
queue.asyncAfter(deadline: .now() + 0.01) {
// writing)
queue.async {
// Retrieve a list of all valid attachmnet and avatar file paths
struct FileInfo {
let attachmentLocalRelativePaths: Set<String>

@ -50,17 +50,16 @@ public enum UpdateProfilePictureJob: JobExecutor {
queue: queue,
displayPictureUpdate: (profilePictureData.map { .currentUserUploadImageData($0) } ?? .none),
success: { db in
// Need to call the 'success' closure asynchronously on the queue after a slight
// delay to prevent a reentrancy issue as it will write to the database and this
// closure is already called within another database write
queue.asyncAfter(deadline: .now() + 0.01) {
queue.async {
SNLog("[UpdateProfilePictureJob] Profile successfully updated")
success(job, false, dependencies)
}
},
failure: { error in
SNLog("[UpdateProfilePictureJob] Failed to update profile")
failure(job, error, false, dependencies)
queue.async {
SNLog("[UpdateProfilePictureJob] Failed to update profile")
failure(job, error, false, dependencies)
}
}
)
}

@ -443,8 +443,8 @@ public extension LibSession {
return contact.priority
case .community:
let maybeUrlInfo: OpenGroupUrlInfo? = Storage.shared
.read { db in try OpenGroupUrlInfo.fetchAll(db, ids: [threadId]) }?
let maybeUrlInfo: OpenGroupUrlInfo? = (try? OpenGroupUrlInfo
.fetchAll(db, ids: [threadId]))?
.first
guard
@ -454,7 +454,7 @@ public extension LibSession {
else { return LibSession.defaultNewThreadPriority }
var community: ugroups_community_info = ugroups_community_info()
let result: Bool = user_groups_get_community(conf, &community, &cBaseUrl, &cRoom)
_ = user_groups_get_community(conf, &community, &cBaseUrl, &cRoom)
LibSessionError.clear(conf)
return community.priority
@ -559,7 +559,7 @@ public extension LibSession {
}
static func conversationInConfig(
_ db: Database? = nil,
_ db: Database,
threadId: String,
threadVariant: SessionThread.Variant,
visibleOnly: Bool,
@ -585,7 +585,7 @@ public extension LibSession {
return dependencies.caches[.libSession]
.config(for: configVariant, publicKey: userPublicKey)
.wrappedValue
.map { conf in
.map { conf -> Bool in
guard var cThreadId: [CChar] = threadId.cString(using: .utf8) else { return false }
switch threadVariant {
@ -611,8 +611,8 @@ public extension LibSession {
return (!visibleOnly || LibSession.shouldBeVisible(priority: contact.priority))
case .community:
let maybeUrlInfo: OpenGroupUrlInfo? = Storage.shared
.read { db in try OpenGroupUrlInfo.fetchAll(db, ids: [threadId]) }?
let maybeUrlInfo: OpenGroupUrlInfo? = (try? OpenGroupUrlInfo
.fetchAll(db, ids: [threadId]))?
.first
guard

@ -1054,30 +1054,28 @@ public final class MessageSender {
guard !rowIds.isEmpty else { return error }
// Note: We need to dispatch this after a small 0.01 delay to prevent any potential
// re-entrancy issues since the 'asyncMigrate' returns a result containing a DB instance
// within a transaction
DispatchQueue.global(qos: .background).asyncAfter(deadline: .now() + 0.01, using: dependencies) {
dependencies.storage.write { db in
switch destination {
case .syncMessage:
try Interaction
.filter(rowIds.contains(Column.rowID))
.updateAll(
db,
Interaction.Columns.state.set(to: Interaction.State.failedToSync),
Interaction.Columns.mostRecentFailureText.set(to: "\(error)")
)
default:
try Interaction
.filter(rowIds.contains(Column.rowID))
.updateAll(
db,
Interaction.Columns.state.set(to: Interaction.State.failed),
Interaction.Columns.mostRecentFailureText.set(to: "\(error)")
)
}
/// If we have affected rows then we should update them with the latest error text
///
/// **Note:** We `writeAsync` here as performing a syncronous `write` results in a reentrancy assertion
dependencies.storage.writeAsync { db in
switch destination {
case .syncMessage:
try Interaction
.filter(rowIds.contains(Column.rowID))
.updateAll(
db,
Interaction.Columns.state.set(to: Interaction.State.failedToSync),
Interaction.Columns.mostRecentFailureText.set(to: "\(error)")
)
default:
try Interaction
.filter(rowIds.contains(Column.rowID))
.updateAll(
db,
Interaction.Columns.state.set(to: Interaction.State.failed),
Interaction.Columns.mostRecentFailureText.set(to: "\(error)")
)
}
}

@ -91,7 +91,7 @@ public struct ProfileManager {
.filter(id: profile.id)
.updateAll(db, Profile.Columns.profilePictureFileName.set(to: nil))
},
completion: { _, _ in
completion: { _ in
// Try to re-download the avatar if it has a URL
if let profilePictureUrl: String = profile.profilePictureUrl, !profilePictureUrl.isEmpty {
// FIXME: Refactor avatar downloading to be a proper Job so we can avoid this

@ -574,10 +574,9 @@ private extension LibSession {
return Log.error("[LibSession] CallbackWrapper called with null context.")
}
/// Dispatch async so we don't block libSession's internals with Swift logic (which can block other requests), we
/// add the `0.01` delay to ensure the closure isn't executed immediately
/// Dispatch async so we don't block libSession's internals with Swift logic (which can block other requests)
let wrapper: CallbackWrapper<Output> = Unmanaged<CallbackWrapper<Output>>.fromOpaque(ctx).takeRetainedValue()
DispatchQueue.global(qos: .default).asyncAfter(deadline: .now() + 0.01) { [wrapper] in
DispatchQueue.global(qos: .default).async { [wrapper] in
wrapper.resultPublisher.send(output)
}
}

@ -239,21 +239,21 @@ public class ConfirmationModal: Modal, UITextFieldDelegate {
case .none:
mainStackView.spacing = Values.smallSpacing
case .text(let text, let canScroll):
case .text(let text, let scrollMode):
mainStackView.spacing = Values.smallSpacing
explanationLabel.text = text
explanationLabel.canScroll = canScroll
explanationLabel.scrollMode = scrollMode
explanationLabel.isHidden = false
case .attributedText(let attributedText, let canScroll):
case .attributedText(let attributedText, let scrollMode):
mainStackView.spacing = Values.smallSpacing
explanationLabel.attributedText = attributedText
explanationLabel.canScroll = canScroll
explanationLabel.scrollMode = scrollMode
explanationLabel.isHidden = false
case .input(let explanation, let placeholder, let value, let clearButton, let onTextChanged):
explanationLabel.attributedText = explanation
explanationLabel.canScroll = false
explanationLabel.scrollMode = .never
explanationLabel.isHidden = (explanation == nil)
textField.placeholder = placeholder
textField.text = (value ?? "")
@ -559,11 +559,11 @@ public extension ConfirmationModal.Info {
case none
case text(
_ text: String,
canScroll: Bool = false
scrollMode: ScrollableLabel.ScrollMode = .automatic
)
case attributedText(
_ attributedText: NSAttributedString,
canScroll: Bool = false
scrollMode: ScrollableLabel.ScrollMode = .automatic
)
case input(
explanation: NSAttributedString?,

@ -2,13 +2,18 @@
import UIKit
class ScrollableLabel: UIView {
public class ScrollableLabel: UIView {
public enum ScrollMode: Equatable, Hashable {
case never
case automatic
}
private var oldSize: CGSize = .zero
private var layoutLoopCounter: Int = 0
var canScroll: Bool = false {
var scrollMode: ScrollMode = .automatic {
didSet {
guard canScroll != oldValue else { return }
guard scrollMode != oldValue else { return }
updateContentSizeIfNeeded()
}
@ -104,7 +109,7 @@ class ScrollableLabel: UIView {
label.set(.width, to: .width, of: scrollView)
}
override func layoutSubviews() {
public override func layoutSubviews() {
super.layoutSubviews()
guard frame.size != oldSize else {
@ -128,7 +133,7 @@ class ScrollableLabel: UIView {
// then we need to fix the height of the scroll view to our desired maximum, other
let maxCalculatedHeight: CGFloat = (label.font.lineHeight * CGFloat(maxNumberOfLinesWhenScrolling))
switch (canScroll, maxCalculatedHeight <= scrollView.contentSize.height) {
switch (scrollMode != .never, maxCalculatedHeight <= scrollView.contentSize.height) {
case (false, _), (true, false):
scrollViewHeightAnchor.isActive = false
labelHeightAnchor.isActive = true

@ -29,6 +29,9 @@ open class Storage {
/// When attempting to do a write the transaction will wait this long to acquite a lock before failing
private static let writeTransactionStartTimeout: TimeInterval = 5
/// If a transaction takes longer than this duration then we should fail the transaction rather than keep hanging
private static let transactionDeadlockTimeoutSeconds: Int = 5
private static var sharedDatabaseDirectoryPath: String { "\(FileManager.default.appSharedDataDirectoryPath)/database" }
private static var databasePath: String { "\(Storage.sharedDatabaseDirectoryPath)/\(Storage.dbFileName)" }
private static var databasePathShm: String { "\(Storage.sharedDatabaseDirectoryPath)/\(Storage.dbFileName)-shm" }
@ -322,17 +325,16 @@ open class Storage {
guard async else { return migrationCompleted(Result(try migrator.migrate(dbWriter))) }
migrator.asyncMigrate(dbWriter) { result in
let finalResult: Swift.Result<Void, Error> = {
let finalResult: Result<Void, Error> = {
switch result {
case .failure(let error): return .failure(error)
case .success: return .success(())
}
}()
// Note: We need to dispatch this after a small 0.01 delay to prevent any potential
// re-entrancy issues since the 'asyncMigrate' returns a result containing a DB instance
// within a transaction
DispatchQueue.global(qos: .userInitiated).asyncAfter(deadline: .now() + 0.01, using: dependencies) {
// Note: We need to dispatch this to the next run toop to prevent blocking if the callback
// performs subsequent database operations
DispatchQueue.global(qos: .userInitiated).async(using: dependencies) {
migrationCompleted(finalResult)
}
}
@ -535,13 +537,16 @@ open class Storage {
static func logIfNeeded(_ error: Error, isWrite: Bool) {
switch error {
case DatabaseError.SQLITE_ABORT, DatabaseError.SQLITE_INTERRUPT:
case DatabaseError.SQLITE_ABORT, DatabaseError.SQLITE_INTERRUPT, DatabaseError.SQLITE_ERROR:
let message: String = ((error as? DatabaseError)?.message ?? "Unknown")
Log.error("[Storage] Database \(isWrite ? "write" : "read") failed due to error: \(message)")
case StorageError.databaseSuspended:
Log.error("[Storage] Database \(isWrite ? "write" : "read") failed as the database is suspended.")
case StorageError.transactionDeadlockTimeout:
Log.critical("[Storage] Database \(isWrite ? "write" : "read") failed due to a potential synchronous query deadlock timeout.")
default: break
}
}
@ -557,71 +562,158 @@ open class Storage {
}
}
private static func perform<T>(
info: CallInfo,
updates: @escaping (Database) throws -> T
) -> (Database) throws -> T {
return { db in
guard info.storage?.isSuspended == false else { throw StorageError.databaseSuspended }
let timer: TransactionTimer = TransactionTimer.start(duration: Storage.slowTransactionThreshold, info: info)
defer { timer.stop() }
// Get the result
let result: T = try updates(db)
// Update the state flags
switch info.isWrite {
case true: info.storage?.hasSuccessfullyWritten = true
case false: info.storage?.hasSuccessfullyRead = true
// MARK: - Operations
private static func track<T>(
_ db: Database,
_ info: CallInfo,
_ operation: @escaping (Database) throws -> T
) throws -> T {
guard info.storage?.isSuspended == false else { throw StorageError.databaseSuspended }
let timer: TransactionTimer = TransactionTimer.start(
duration: Storage.slowTransactionThreshold,
info: info
)
defer { timer.stop() }
// Get the result
let result: T = try operation(db)
// Update the state flags
switch info.isWrite {
case true: info.storage?.hasSuccessfullyWritten = true
case false: info.storage?.hasSuccessfullyRead = true
}
return result
}
/// This function manually performs `read`/`write` operations in either a synchronous or asyncronous way using a semaphore to
/// block the syncrhonous version because `GRDB` has an internal assertion when using it's built-in synchronous `read`/`write`
/// functions to prevent reentrancy which is unsupported
///
/// Unfortunately this results in the code getting messy when trying to chain multiple database transactions (even
/// when using `db.afterNextTransaction`) which is somewhat unintuitive
///
/// The `async` variants don't need to worry about this reentrancy issue so instead we route we use those for all operations instead
/// and just block the thread when we want to perform a synchronous operation
@discardableResult private static func performOperation<T>(
_ info: CallInfo,
_ operation: @escaping (Database) throws -> T,
_ completion: ((Result<T, Error>) -> Void)? = nil
) -> Result<T, Error> {
var result: Result<T, Error> = .failure(StorageError.invalidQueryResult)
let semaphore: DispatchSemaphore? = (info.isAsync ? nil : DispatchSemaphore(value: 0))
let logErrorIfNeeded: (Result<T, Error>) -> () = { result in
switch result {
case .success: break
case .failure(let error): StorageState.logIfNeeded(error, isWrite: info.isWrite)
}
}
/// Perform the actual operation
switch (StorageState(info.storage), info.isWrite) {
case (.invalid(let error), _):
result = .failure(error)
semaphore?.signal()
return result
case (.valid(let dbWriter), true):
dbWriter.asyncWrite(
{ db in result = .success(try Storage.track(db, info, operation)) },
completion: { _, dbResult in
switch dbResult {
case .success: break
case .failure(let error): result = .failure(error)
}
semaphore?.signal()
if info.isAsync { logErrorIfNeeded(result) }
completion?(result)
}
)
case (.valid(let dbWriter), false):
dbWriter.asyncRead { dbResult in
do {
switch dbResult {
case .failure(let error): throw error
case .success(let db): result = .success(try Storage.track(db, info, operation))
}
} catch {
result = .failure(error)
}
semaphore?.signal()
if info.isAsync { logErrorIfNeeded(result) }
completion?(result)
}
}
/// If this is a synchronous operation then `semaphore` will exist and will block here waiting on the signal from one of the
/// above closures to be sent
let semaphoreResult: DispatchTimeoutResult? = semaphore?.wait(timeout: .now() + .seconds(Storage.transactionDeadlockTimeoutSeconds))
/// If the transaction timed out then log the error and report a failure
guard semaphoreResult != .timedOut else {
StorageState.logIfNeeded(StorageError.transactionDeadlockTimeout, isWrite: info.isWrite)
return .failure(StorageError.transactionDeadlockTimeout)
}
if !info.isAsync { logErrorIfNeeded(result) }
return result
}
private func performPublisherOperation<T>(
_ fileName: String,
_ functionName: String,
_ lineNumber: Int,
isWrite: Bool,
_ operation: @escaping (Database) throws -> T
) -> AnyPublisher<T, Error> {
switch StorageState(self) {
case .invalid(let error): return StorageState.logIfNeeded(error, isWrite: false)
case .valid:
/// **Note:** GRDB does have `readPublisher`/`writePublisher` functions but it appears to asynchronously
/// trigger both the `output` and `complete` closures at the same time which causes a lot of unexpected
/// behaviours (this behaviour is apparently expected but still causes a number of odd behaviours in our code
/// for more information see https://github.com/groue/GRDB.swift/issues/1334)
///
/// Instead of this we are just using `Deferred { Future {} }` which is executed on the specified scheduled
/// which behaves in a much more expected way than the GRDB `readPublisher`/`writePublisher` does
let info: CallInfo = CallInfo(self, fileName, functionName, lineNumber, .syncWrite)
return Deferred {
Future { resolver in
resolver(Storage.performOperation(info, operation))
}
}.eraseToAnyPublisher()
}
}
// MARK: - Functions
@discardableResult public func write<T>(
fileName: String = #file,
functionName: String = #function,
lineNumber: Int = #line,
fileName file: String = #file,
functionName funcN: String = #function,
lineNumber line: Int = #line,
using dependencies: Dependencies = Dependencies(),
updates: @escaping (Database) throws -> T?
) -> T? {
switch StorageState(self) {
case .invalid(let error): return StorageState.logIfNeeded(error, isWrite: true)
case .valid(let dbWriter):
let info: CallInfo = CallInfo(fileName, functionName, lineNumber, true, self)
do { return try dbWriter.write(Storage.perform(info: info, updates: updates)) }
catch { return StorageState.logIfNeeded(error, isWrite: true) }
switch Storage.performOperation(CallInfo(self, file, funcN, line, .syncWrite), updates) {
case .failure: return nil
case .success(let result): return result
}
}
open func writeAsync<T>(
fileName: String = #file,
functionName: String = #function,
lineNumber: Int = #line,
fileName file: String = #file,
functionName funcN: String = #function,
lineNumber line: Int = #line,
using dependencies: Dependencies = Dependencies(),
updates: @escaping (Database) throws -> T,
completion: @escaping (Database, Swift.Result<T, Error>) throws -> Void = { _, _ in }
completion: @escaping (Result<T, Error>) -> Void = { _ in }
) {
switch StorageState(self) {
case .invalid(let error): return StorageState.logIfNeeded(error, isWrite: true)
case .valid(let dbWriter):
let info: CallInfo = CallInfo(fileName, functionName, lineNumber, true, self)
dbWriter.asyncWrite(
Storage.perform(info: info, updates: updates),
completion: { db, result in
switch result {
case .failure(let error): StorageState.logIfNeeded(error, isWrite: true)
default: break
}
try? completion(db, result)
}
)
}
Storage.performOperation(CallInfo(self, file, funcN, line, .asyncWrite), updates, completion)
}
open func writePublisher<T>(
@ -631,50 +723,19 @@ open class Storage {
using dependencies: Dependencies = Dependencies(),
updates: @escaping (Database) throws -> T
) -> AnyPublisher<T, Error> {
switch StorageState(self) {
case .invalid(let error): return StorageState.logIfNeeded(error, isWrite: true)
case .valid:
/// **Note:** GRDB does have a `writePublisher` method but it appears to asynchronously trigger
/// both the `output` and `complete` closures at the same time which causes a lot of unexpected
/// behaviours (this behaviour is apparently expected but still causes a number of odd behaviours in our code
/// for more information see https://github.com/groue/GRDB.swift/issues/1334)
///
/// Instead of this we are just using `Deferred { Future {} }` which is executed on the specified scheduled
/// which behaves in a much more expected way than the GRDB `writePublisher` does
let info: CallInfo = CallInfo(fileName, functionName, lineNumber, true, self)
return Deferred {
Future { [weak self] resolver in
/// The `StorageState` may have changed between the creation of the publisher and it actually
/// being executed so we need to check again
switch StorageState(self) {
case .invalid(let error): return StorageState.logIfNeeded(error, isWrite: true)
case .valid(let dbWriter):
do {
resolver(Result.success(try dbWriter.write(Storage.perform(info: info, updates: updates))))
}
catch {
StorageState.logIfNeeded(error, isWrite: true)
resolver(Result.failure(error))
}
}
}
}.eraseToAnyPublisher()
}
return performPublisherOperation(fileName, functionName, lineNumber, isWrite: true, updates)
}
@discardableResult public func read<T>(
fileName: String = #file,
functionName: String = #function,
lineNumber: Int = #line,
fileName file: String = #file,
functionName funcN: String = #function,
lineNumber line: Int = #line,
using dependencies: Dependencies = Dependencies(),
_ value: @escaping (Database) throws -> T?
) -> T? {
switch StorageState(self) {
case .invalid(let error): return StorageState.logIfNeeded(error, isWrite: false)
case .valid(let dbWriter):
let info: CallInfo = CallInfo(fileName, functionName, lineNumber, false, self)
do { return try dbWriter.read(Storage.perform(info: info, updates: value)) }
catch { return StorageState.logIfNeeded(error, isWrite: false) }
switch Storage.performOperation(CallInfo(self, file, funcN, line, .syncRead), value) {
case .failure: return nil
case .success(let result): return result
}
}
@ -685,35 +746,7 @@ open class Storage {
using dependencies: Dependencies = Dependencies(),
value: @escaping (Database) throws -> T
) -> AnyPublisher<T, Error> {
switch StorageState(self) {
case .invalid(let error): return StorageState.logIfNeeded(error, isWrite: false)
case .valid:
/// **Note:** GRDB does have a `readPublisher` method but it appears to asynchronously trigger
/// both the `output` and `complete` closures at the same time which causes a lot of unexpected
/// behaviours (this behaviour is apparently expected but still causes a number of odd behaviours in our code
/// for more information see https://github.com/groue/GRDB.swift/issues/1334)
///
/// Instead of this we are just using `Deferred { Future {} }` which is executed on the specified scheduled
/// which behaves in a much more expected way than the GRDB `readPublisher` does
let info: CallInfo = CallInfo(fileName, functionName, lineNumber, false, self)
return Deferred {
Future { [weak self] resolver in
/// The `StorageState` may have changed between the creation of the publisher and it actually
/// being executed so we need to check again
switch StorageState(self) {
case .invalid(let error): return StorageState.logIfNeeded(error, isWrite: false)
case .valid(let dbWriter):
do {
resolver(Result.success(try dbWriter.read(Storage.perform(info: info, updates: value))))
}
catch {
StorageState.logIfNeeded(error, isWrite: false)
resolver(Result.failure(error))
}
}
}
}.eraseToAnyPublisher()
}
return performPublisherOperation(fileName, functionName, lineNumber, isWrite: false, value)
}
/// Rever to the `ValueObservation.start` method for full documentation
@ -904,11 +937,18 @@ public extension Storage {
private extension Storage {
class CallInfo {
enum Behaviour {
case syncRead
case asyncRead
case syncWrite
case asyncWrite
}
weak var storage: Storage?
let file: String
let function: String
let line: Int
let isWrite: Bool
weak var storage: Storage?
let behaviour: Behaviour
var callInfo: String {
let fileInfo: String = (file.components(separatedBy: "/").last.map { "\($0):\(line) - " } ?? "")
@ -916,18 +956,31 @@ private extension Storage {
return "\(fileInfo)\(function)"
}
var isWrite: Bool {
switch behaviour {
case .syncWrite, .asyncWrite: return true
case .syncRead, .asyncRead: return false
}
}
var isAsync: Bool {
switch behaviour {
case .asyncRead, .asyncWrite: return true
case .syncRead, .syncWrite: return false
}
}
init(
_ storage: Storage?,
_ file: String,
_ function: String,
_ line: Int,
_ isWrite: Bool,
_ storage: Storage?
_ behaviour: Behaviour
) {
self.storage = storage
self.file = file
self.function = function
self.line = line
self.isWrite = isWrite
self.storage = storage
self.behaviour = behaviour
}
}
}

@ -13,6 +13,8 @@ public enum StorageError: Error {
case keySpecCreationFailed
case keySpecInaccessible
case decodingFailed
case invalidQueryResult
case transactionDeadlockTimeout
case failedToSave
case objectNotFound

@ -180,12 +180,9 @@ public class PagedDatabaseObserver<ObservedTable, T>: TransactionObserver where
return []
}
// This looks odd but if we just use `commitProcessingQueue.async` then the code can
// get executed immediately wihch can result in a new transaction being started whilst
// we are still within the transaction wrapping `databaseDidCommit` (which we don't
// want), by adding this tiny 0.01 delay we should be giving it enough time to finish
// processing the current transaction
commitProcessingQueue.asyncAfter(deadline: .now() + 0.01) { [weak self] in
// Dispatch to the `commitProcessingQueue` so we don't block the database `write` queue
// when updatind the data
commitProcessingQueue.async { [weak self] in
self?.processDatabaseCommit(committedChanges: committedChanges)
}
}

@ -1298,10 +1298,7 @@ public final class JobQueue: Hashable {
// thread and do so by creating a number of background queues to run the jobs on, if this
// function was called on the wrong queue then we need to dispatch to the correct one
guard DispatchQueue.with(key: queueKey, matches: queueContext, using: dependencies) else {
// Note: We need to dispatch this after a small 0.01 delay to prevent any potential
// re-entrancy issues since the `start` function can be called within an existing
// database transaction (eg. via `db.afterNextTransactionNestedOnce`)
internalQueue.asyncAfter(deadline: .now() + 0.01, using: dependencies) { [weak self] in
internalQueue.async(using: dependencies) { [weak self] in
self?.start(forceWhenAlreadyRunning: forceWhenAlreadyRunning, using: dependencies)
}
return

@ -77,7 +77,7 @@ public class DirectoryArchiver {
// Stream-based directory traversal and compression
let enumerator: FileManager.DirectoryEnumerator? = FileManager.default.enumerator(
at: sourceUrl,
includingPropertiesForKeys: [.isRegularFileKey, .isDirectoryKey]
includingPropertiesForKeys: [.isRegularFileKey, .isHiddenKey, .isDirectoryKey]
)
let fileUrls: [URL] = (enumerator?.allObjects
.compactMap { $0 as? URL }
@ -85,11 +85,14 @@ public class DirectoryArchiver {
guard !filenamesToExclude.contains(url.lastPathComponent) else { return false }
guard
let resourceValues = try? url.resourceValues(
forKeys: [.isRegularFileKey, .isDirectoryKey]
forKeys: [.isRegularFileKey, .isHiddenKey, .isDirectoryKey]
)
else { return true }
return (resourceValues.isRegularFile == true)
return (
resourceValues.isRegularFile == true &&
resourceValues.isHidden != true
)
})
.defaulting(to: [])
var index: Int = 0
@ -215,6 +218,7 @@ public class DirectoryArchiver {
var filePaths: [String] = []
var additionalFilePaths: [String] = []
var skippedFilePaths: [String] = []
var fileAmountProcessed: UInt64 = 0
progressChanged?(0, Int(expectedFileCount + expectedAdditionalFileCount), 0, encryptedFileSize)
while inputStream.hasBytesAvailable {
@ -224,7 +228,7 @@ public class DirectoryArchiver {
)
fileAmountProcessed += UInt64(blockSizeBytesRead)
progressChanged?(
(filePaths.count + additionalFilePaths.count),
(filePaths.count + skippedFilePaths.count + additionalFilePaths.count),
Int(expectedFileCount + expectedAdditionalFileCount),
fileAmountProcessed,
encryptedFileSize
@ -274,12 +278,30 @@ public class DirectoryArchiver {
)
fileAmountProcessed += encryptedSize
progressChanged?(
(filePaths.count + additionalFilePaths.count),
(filePaths.count + skippedFilePaths.count + additionalFilePaths.count),
Int(expectedFileCount + expectedAdditionalFileCount),
fileAmountProcessed,
encryptedFileSize
)
// If the file is a hidden file (shouldn't be possible anymore but old backups had this
// issue) then just skip the file - any hidden files are from Apple and seem to fail to
// decrypt causing the entire import to fail
guard !URL(fileURLWithPath: relativePath).lastPathComponent.starts(with: ".") else {
Log.warn(.cat, "Skipping hidden file to avoid breaking the import: \(relativePath)")
skippedFilePaths.append(fullPath)
// Update the progress
fileAmountProcessed += fileSize
progressChanged?(
(filePaths.count + skippedFilePaths.count + additionalFilePaths.count),
Int(expectedFileCount + expectedAdditionalFileCount),
fileAmountProcessed,
encryptedFileSize
)
continue
}
// Read and decrypt file content
guard let outputStream: OutputStream = OutputStream(toFileAtPath: fullPath, append: false) else {
Log.error(.cat, "Failed to create output stream")
@ -302,7 +324,7 @@ public class DirectoryArchiver {
// Update the progress
fileAmountProcessed += UInt64(chunkSizeBytesRead) + UInt64(encryptedSize)
progressChanged?(
(filePaths.count + additionalFilePaths.count),
(filePaths.count + skippedFilePaths.count + additionalFilePaths.count),
Int(expectedFileCount + expectedAdditionalFileCount),
fileAmountProcessed,
encryptedFileSize
@ -315,7 +337,7 @@ public class DirectoryArchiver {
case true: additionalFilePaths.append(fullPath)
}
progressChanged?(
(filePaths.count + additionalFilePaths.count),
(filePaths.count + skippedFilePaths.count + additionalFilePaths.count),
Int(expectedFileCount + expectedAdditionalFileCount),
fileAmountProcessed,
encryptedFileSize
@ -345,12 +367,12 @@ public class DirectoryArchiver {
throw ArchiveError.importedFileCountMismatch
}
guard
filePaths.count == expectedFileCount &&
(filePaths.count + skippedFilePaths.count) == expectedFileCount &&
additionalFilePaths.count == expectedAdditionalFileCount
else {
switch ((filePaths.count == expectedFileCount), additionalFilePaths.count == expectedAdditionalFileCount) {
switch (((filePaths.count + skippedFilePaths.count) == expectedFileCount), additionalFilePaths.count == expectedAdditionalFileCount) {
case (false, true):
Log.error(.cat, "The number of main files decrypted (\(filePaths.count)) didn't match the expected number of main files (\(expectedFileCount))")
Log.error(.cat, "The number of main files decrypted (\(filePaths.count)) plus skipped files (\(skippedFilePaths.count)) didn't match the expected number of main files (\(expectedFileCount))")
case (true, false):
Log.error(.cat, "The number of additional files decrypted (\(additionalFilePaths.count)) didn't match the expected number of additional files (\(expectedAdditionalFileCount))")

@ -119,14 +119,14 @@ class SynchronousStorage: Storage {
lineNumber: Int = #line,
using dependencies: Dependencies = Dependencies(),
updates: @escaping (Database) throws -> T,
completion: @escaping (Database, Result<T, Error>) throws -> Void
completion: @escaping (Result<T, Error>) -> Void
) {
do {
let result: T = try write(using: dependencies, updates: updates) ?? { throw StorageError.failedToSave }()
write(using: dependencies) { db in try completion(db, Result.success(result)) }
completion(Result.success(result))
}
catch {
write(using: dependencies) { db in try completion(db, Result.failure(error)) }
completion(Result.failure(error))
}
}

Loading…
Cancel
Save