Merge remote-tracking branch 'upstream/master' into master-windows-changes

This commit is contained in:
LAGonauta 2022-12-28 09:13:59 -03:00
commit fb05c83c8e
19 changed files with 196 additions and 135 deletions

View file

@ -20,6 +20,7 @@ function(find_packages result)
find_package(${pkg} ${pkg_version} REQUIRED)
list(APPEND _res ${${pkg}_PKG_CONFIG_NAME})
list(APPEND _res_libs ${${pkg}_LIBRARIES})
set(${pkg}_VERSION "${${pkg}_VERSION}" PARENT_SCOPE)
endforeach(pkg)
foreach(pkg ${ARGS_OPTIONAL})
string(REPLACE ">=" ";" pkg_ ${pkg})
@ -38,6 +39,7 @@ function(find_packages result)
if(${pkg}_FOUND)
list(APPEND _res ${${pkg}_PKG_CONFIG_NAME})
list(APPEND _res_libs ${${pkg}_LIBRARIES})
set(${pkg}_VERSION "${${pkg}_VERSION}" PARENT_SCOPE)
endif()
endforeach(pkg)
set(${result} "${_res}" PARENT_SCOPE)

33
cmake/SoupVersion.cmake Normal file
View file

@ -0,0 +1,33 @@
find_package(Nice QUIET)
if (Nice_FOUND AND NOT SOUP_VERSION AND NOT USE_SOUP3)
file(GET_RUNTIME_DEPENDENCIES
RESOLVED_DEPENDENCIES_VAR Nice_DEPENDENCIES
UNRESOLVED_DEPENDENCIES_VAR Nice_UNRESOLVED_DEPENDENCIES
LIBRARIES ${Nice_LIBRARY}
PRE_INCLUDE_REGEXES "soup|gupnp"
PRE_EXCLUDE_REGEXES "."
)
foreach (lib ${Nice_DEPENDENCIES})
if (lib MATCHES ".*/libsoup-3.*")
set(SOUP_VERSION 3)
endif ()
endforeach ()
foreach (lib ${Nice_DEPENDENCIES})
if (lib MATCHES ".*/libsoup-2.*")
set(SOUP_VERSION 2)
endif ()
endforeach ()
set(SOUP_VERSION ${SOUP_VERSION} CACHE STRING "Version of libsoup to use")
set_property(CACHE SOUP_VERSION PROPERTY STRINGS "2" "3")
message(STATUS "Using Soup${SOUP_VERSION} to provide Soup")
elseif (NOT SOUP_VERSION)
find_package(Soup2 QUIET)
find_package(Soup3 QUIET)
# Only use libsoup 3 if specifically requested or when libsoup 2 is not available
if (Soup3_FOUND AND NOT Soup2_FOUND OR USE_SOUP3)
set(SOUP_VERSION 3)
else ()
set(SOUP_VERSION 2)
endif ()
endif ()
set(Soup "Soup${SOUP_VERSION}")

6
configure vendored
View file

@ -1,7 +1,7 @@
#!/bin/sh
OPTS=`getopt -o "h" --long \
help,fetch-only,no-debug,disable-fast-vapi,with-tests,release,with-libsignal-in-tree,\
help,fetch-only,no-debug,disable-fast-vapi,with-tests,release,with-libsignal-in-tree,with-libsoup3,\
enable-plugin:,disable-plugin:,\
prefix:,program-prefix:,exec-prefix:,lib-suffix:,\
bindir:,libdir:,includedir:,datadir:,\
@ -22,6 +22,7 @@ DISABLE_FAST_VAPI=
LIB_SUFFIX=
NO_DEBUG=
FETCH_ONLY=
USE_SOUP3=
EXEC_PREFIX=
BINDIR=
@ -56,6 +57,7 @@ Configuration:
--release Configure to build an optimized release version
--with-libsignal-in-tree Build libsignal-protocol-c in tree and link it
statically.
--with-libsoup3 Build with libsoup-3.0
--with-tests Also build tests.
Plugin configuration:
@ -111,6 +113,7 @@ while true; do
--valac-flags ) VALAC_FLAGS="$2"; shift; shift ;;
--lib-suffix ) LIB_SUFFIX="$2"; shift; shift ;;
--with-libsignal-in-tree ) BUILD_LIBSIGNAL_IN_TREE=yes; shift ;;
--with-libsoup3 ) USE_SOUP3=yes; shift ;;
--disable-fast-vapi ) DISABLE_FAST_VAPI=yes; shift ;;
--no-debug ) NO_DEBUG=yes; shift ;;
--fetch-only ) FETCH_ONLY=yes; shift ;;
@ -256,6 +259,7 @@ cmake -G "$cmake_type" \
-DDISABLED_PLUGINS="$DISABLED_PLUGINS" \
-DBUILD_TESTS="$BUILD_TESTS" \
-DBUILD_LIBSIGNAL_IN_TREE="$BUILD_LIBSIGNAL_IN_TREE" \
-DUSE_SOUP3="$USE_SOUP3" \
-DVALA_EXECUTABLE="$VALAC" \
-DCMAKE_VALA_FLAGS="$VALACFLAGS" \
-DDISABLE_FAST_VAPI="$DISABLE_FAST_VAPI" \

View file

@ -246,7 +246,15 @@ public class FileManager : StreamInteractionModule, Object {
File file = File.new_for_path(Path.build_filename(get_storage_dir(), filename));
OutputStream os = file.create(FileCreateFlags.REPLACE_DESTINATION);
yield os.splice_async(input_stream, OutputStreamSpliceFlags.CLOSE_SOURCE | OutputStreamSpliceFlags.CLOSE_TARGET, Priority.LOW, file_transfer.cancellable);
uint8[] buffer = new uint8[1024];
ssize_t read;
while ((read = yield input_stream.read_async(buffer, Priority.LOW, file_transfer.cancellable)) > 0) {
buffer.length = (int) read;
yield os.write_async(buffer, Priority.LOW, file_transfer.cancellable);
buffer.length = 1024;
}
yield input_stream.close_async(Priority.LOW, file_transfer.cancellable);
yield os.close_async(Priority.LOW, file_transfer.cancellable);
file_transfer.path = file.get_basename();
file_transfer.input_stream = yield file.read_async();

View file

@ -46,7 +46,7 @@ public class Dino.HistorySync {
}
public void update_latest_db_range(Account account, Xmpp.MessageStanza message_stanza) {
Jid mam_server = stream_interactor.get_module(MucManager.IDENTITY).might_be_groupchat(message_stanza.from, account) ? message_stanza.from.bare_jid : account.bare_jid;
Jid mam_server = stream_interactor.get_module(MucManager.IDENTITY).might_be_groupchat(message_stanza.from.bare_jid, account) ? message_stanza.from.bare_jid : account.bare_jid;
if (!current_catchup_id.has_key(account) || !current_catchup_id[account].has_key(mam_server)) return;
@ -77,8 +77,6 @@ public class Dino.HistorySync {
if (!stanzas.has_key(mam_flag.query_id)) stanzas[mam_flag.query_id] = new ArrayList<Xmpp.MessageStanza>();
stanzas[mam_flag.query_id].add(message_stanza);
print(@"[$(message_stanza.from)] qid $(mam_flag.query_id) time $(mam_flag.server_time) $(mam_flag.mam_id) $(message_stanza.body ?? "[none]")\n");
}
private void on_unprocessed_message(Account account, XmppStream stream, MessageStanza message) {
@ -104,7 +102,7 @@ public class Dino.HistorySync {
// Check if this is the target message
string? query_id = message.stanza.get_deep_attribute(mam_flag.ns_ver + ":result", mam_flag.ns_ver + ":queryid");
if (query_id != null && id == catchup_until_id[account]) {
debug("MAM: [%s] Hitted range (id) %s", account.bare_jid.to_string(), id);
debug("[%s] Hitted range (id) %s", account.bare_jid.to_string(), id);
hitted_range[query_id] = -2;
}
}
@ -121,7 +119,7 @@ public class Dino.HistorySync {
}
public async void fetch_everything(Account account, Jid mam_server, DateTime until_earliest_time = new DateTime.from_unix_utc(0)) {
print(@"Fetch everything for $(mam_server) %s\n".printf(until_earliest_time != null ? @"(until $until_earliest_time)" : ""));
debug("Fetch everything for %s %s", mam_server.to_string(), until_earliest_time != null ? @"(until $until_earliest_time)" : "");
RowOption latest_row_opt = db.mam_catchup.select()
.with(db.mam_catchup.account_id, "=", account.id)
.with(db.mam_catchup.server_jid, "=", mam_server.to_string())
@ -139,15 +137,12 @@ public class Dino.HistorySync {
}
// Set the previous and current row
print(@"$(new_row == null) $(latest_row == null)\n");
Row? previous_row = null;
Row? current_row = null;
if (new_row != null) {
print(@"Fetch everything $(mam_server) a\n");
current_row = new_row;
previous_row = latest_row;
} else if (latest_row != null) {
print(@"Fetch everything $(mam_server) b\n");
current_row = latest_row;
RowOption previous_row_opt = db.mam_catchup.select()
.with(db.mam_catchup.account_id, "=", account.id)
@ -159,12 +154,11 @@ public class Dino.HistorySync {
previous_row = previous_row_opt.is_present() ? previous_row_opt.inner : null;
}
print(@"Fetch everything $(mam_server) c $(current_row == null) $(previous_row == null)\n");
// Fetch messages between two db ranges and merge them
while (current_row != null && previous_row != null) {
if (current_row[db.mam_catchup.from_end]) return;
print("FETCH BETWEEN RANGES\n");
debug("[%s] Fetching between ranges %s - %s", mam_server.to_string(), previous_row[db.mam_catchup.to_time].to_string(), current_row[db.mam_catchup.from_time].to_string());
current_row = yield fetch_between_ranges(account, mam_server, previous_row, current_row);
if (current_row == null) return;
@ -184,13 +178,12 @@ public class Dino.HistorySync {
// For now, don't query if we are within a week of until_earliest_time
if (until_earliest_time != null &&
current_row[db.mam_catchup.from_time] > until_earliest_time.add(-TimeSpan.DAY * 7).to_unix()) return;
print("FETCH BEFORE RANGE\n");
yield fetch_before_range(account, mam_server, current_row, until_earliest_time);
}
// Fetches the latest page (up to previous db row). Extends the previous db row if it was reached, creates a new row otherwise.
public async Row? fetch_latest_page(Account account, Jid mam_server, Row? latest_row, DateTime? until_earliest_time) {
debug("MAM: [%s | %s] Fetching latest page", mam_server.to_string(), mam_server.to_string());
debug("[%s | %s] Fetching latest page", account.bare_jid.to_string(), mam_server.to_string());
int latest_row_id = -1;
DateTime latest_message_time = until_earliest_time;
@ -199,7 +192,6 @@ public class Dino.HistorySync {
if (latest_row != null) {
latest_row_id = latest_row[db.mam_catchup.id];
latest_message_time = (new DateTime.from_unix_utc(latest_row[db.mam_catchup.to_time])).add_minutes(-5);
print(@"latest msg time $latest_message_time\n");
latest_message_id = latest_row[db.mam_catchup.to_id];
// Make sure we only fetch to until_earliest_time if latest_message_time is further back
@ -213,25 +205,27 @@ public class Dino.HistorySync {
PageRequestResult page_result = yield get_mam_page(account, query_params, null);
if (page_result.page_result == PageResult.Error || page_result.page_result == PageResult.Duplicate) {
debug("MAM [%s | %s] Failed fetching latest page %s", mam_server.to_string(), mam_server.to_string(), page_result.page_result.to_string());
if (page_result.page_result == PageResult.Duplicate) {
// No new messages
return null;
}
print(@"MAM result: $(page_result.page_result))\n");
if (page_result.page_result == PageResult.Error) {
debug("[%s | %s] Failed fetching latest page %s", mam_server.to_string(), mam_server.to_string(), page_result.page_result.to_string());
return null;
}
// Catchup finished within first page. Update latest db entry.
if (page_result.page_result in new PageResult[] { PageResult.TargetReached, PageResult.NoMoreMessages } && latest_row_id != -1) {
if (page_result.stanzas == null || page_result.stanzas.is_empty) return null;
string first_mam_id = page_result.query_result.first;
long first_mam_time = (long) mam_times[account][first_mam_id].to_unix();
string latest_mam_id = page_result.query_result.last;
long latest_mam_time = (long) mam_times[account][latest_mam_id].to_unix();
print(@"Updating $mam_server to $first_mam_time, $first_mam_id\n");
var query = db.mam_catchup.update()
.with(db.mam_catchup.id, "=", latest_row_id)
.set(db.mam_catchup.to_time, first_mam_time)
.set(db.mam_catchup.to_id, first_mam_id);
.set(db.mam_catchup.to_time, latest_mam_time)
.set(db.mam_catchup.to_id, latest_mam_id);
if (page_result.page_result == PageResult.NoMoreMessages) {
// If the server doesn't have more messages, store that this range is at its end.
@ -242,18 +236,17 @@ public class Dino.HistorySync {
}
if (page_result.query_result.first == null || page_result.query_result.last == null) {
print(@"from/to id null\n");
return null;
}
// Either we need to fetch more pages or this is the first db entry ever
debug("MAM: [%s | %s] Creating new db range for latest page", mam_server.to_string(), mam_server.to_string());
debug("[%s | %s] Creating new db range for latest page", mam_server.to_string(), mam_server.to_string());
string from_id = page_result.query_result.first;
string to_id = page_result.query_result.last;
if (!mam_times[account].has_key(from_id) || !mam_times[account].has_key(to_id)) {
print(@"Missing from/to id $from_id $to_id\n");
debug("Missing from/to id %s %s", from_id, to_id);
return null;
}
@ -280,17 +273,16 @@ public class Dino.HistorySync {
int later_range_id = (int) later_range[db.mam_catchup.id];
DateTime earliest_time = new DateTime.from_unix_utc(earlier_range[db.mam_catchup.to_time]);
DateTime latest_time = new DateTime.from_unix_utc(later_range[db.mam_catchup.from_time]);
debug("MAM [%s | %s] Fetching between %s (%s) and %s (%s)", mam_server.to_string(), mam_server.to_string(), earliest_time.to_string(), earlier_range[db.mam_catchup.to_id], latest_time.to_string(), later_range[db.mam_catchup.from_id]);
debug("[%s | %s] Fetching between %s (%s) and %s (%s)", account.bare_jid.to_string(), mam_server.to_string(), earliest_time.to_string(), earlier_range[db.mam_catchup.to_id], latest_time.to_string(), later_range[db.mam_catchup.from_id]);
var query_params = new Xmpp.MessageArchiveManagement.V2.MamQueryParams.query_between(mam_server,
earliest_time, earlier_range[db.mam_catchup.to_id],
latest_time, later_range[db.mam_catchup.from_id]);
print("fetch between ranges\n");
PageRequestResult page_result = yield fetch_query(account, query_params, later_range_id);
print(@"page result null? $(page_result == null)\n");
if (page_result.page_result == PageResult.TargetReached) {
debug("MAM [%s | %s] Merging range %i into %i", mam_server.to_string(), mam_server.to_string(), earlier_range[db.mam_catchup.id], later_range_id);
debug("[%s | %s] Merging range %i into %i", account.bare_jid.to_string(), mam_server.to_string(), earlier_range[db.mam_catchup.id], later_range_id);
// Merge earlier range into later one.
db.mam_catchup.update()
.with(db.mam_catchup.id, "=", later_range_id)
@ -311,6 +303,7 @@ public class Dino.HistorySync {
private async void fetch_before_range(Account account, Jid mam_server, Row range, DateTime? until_earliest_time) {
DateTime latest_time = new DateTime.from_unix_utc(range[db.mam_catchup.from_time]);
string latest_id = range[db.mam_catchup.from_id];
debug("[%s | %s] Fetching before range < %s, %s", account.bare_jid.to_string(), mam_server.to_string(), latest_time.to_string(), latest_id);
Xmpp.MessageArchiveManagement.V2.MamQueryParams query_params;
if (until_earliest_time == null) {
@ -330,32 +323,30 @@ public class Dino.HistorySync {
* @return The last PageRequestResult result
**/
private async PageRequestResult fetch_query(Account account, Xmpp.MessageArchiveManagement.V2.MamQueryParams query_params, int db_id) {
print("fetch query\n");
debug("[%s | %s] Fetch query %s - %s", account.bare_jid.to_string(), query_params.mam_server.to_string(), query_params.start != null ? query_params.start.to_string() : "", query_params.end != null ? query_params.end.to_string() : "");
PageRequestResult? page_result = null;
do {
page_result = yield get_mam_page(account, query_params, page_result);
print(@"page result $(page_result.page_result) $(page_result.stanzas == null)\n");
debug("Page result %s %b", page_result.page_result.to_string(), page_result.stanzas == null);
if (page_result.page_result == PageResult.Error || page_result.stanzas == null) return page_result;
string last_mam_id = page_result.query_result.last;
long last_mam_time = (long)mam_times[account][last_mam_id].to_unix();
string earliest_mam_id = page_result.query_result.first;
long earliest_mam_time = (long)mam_times[account][earliest_mam_id].to_unix();
print(@"Updating $(query_params.mam_server) to $last_mam_time, $last_mam_id\n");
debug("Updating %s to %s, %s", query_params.mam_server.to_string(), earliest_mam_time.to_string(), earliest_mam_id);
var query = db.mam_catchup.update()
.with(db.mam_catchup.id, "=", db_id)
.set(db.mam_catchup.from_time, last_mam_time)
.set(db.mam_catchup.from_id, last_mam_id);
.set(db.mam_catchup.from_time, earliest_mam_time)
.set(db.mam_catchup.from_id, earliest_mam_id);
if (page_result.page_result == PageResult.NoMoreMessages) {
// If the server doesn't have more messages, store that this range is at its end.
print("no more message\n");
query.set(db.mam_catchup.from_end, true);
}
query.perform();
} while (page_result.page_result == PageResult.MorePagesAvailable);
print(@"page result 2 $(page_result.page_result)\n");
return page_result;
}
@ -363,7 +354,7 @@ public class Dino.HistorySync {
MorePagesAvailable,
TargetReached,
NoMoreMessages,
Duplicate, // TODO additional boolean
Duplicate,
Error
}
@ -378,14 +369,13 @@ public class Dino.HistorySync {
} else {
query_result = yield Xmpp.MessageArchiveManagement.V2.page_through_results(stream, query_params, prev_page_result.query_result);
}
return yield process_query_result(account, query_result, query_params.query_id, query_params.start_id);
return yield process_query_result(account, query_params, query_result);
}
private async PageRequestResult process_query_result(Account account, Xmpp.MessageArchiveManagement.QueryResult query_result, string query_id, string? after_id) {
private async PageRequestResult process_query_result(Account account, Xmpp.MessageArchiveManagement.V2.MamQueryParams query_params, Xmpp.MessageArchiveManagement.QueryResult query_result) {
PageResult page_result = PageResult.MorePagesAvailable;
if (query_result.malformed || query_result.error) {
print(@"$(query_result.malformed) $(query_result.error)\n");
page_result = PageResult.Error;
}
@ -402,49 +392,71 @@ public class Dino.HistorySync {
string selection = null;
string[] selection_args = {};
// Check the server id of all returned messages. Check if we've hit our target (from_id) or got a duplicate.
string query_id = query_params.query_id;
string? after_id = query_params.start_id;
if (stanzas.has_key(query_id) && !stanzas[query_id].is_empty) {
print(@"$(stanzas.has_key(query_id)) $(!stanzas[query_id].is_empty) looking for $(after_id ?? "")\n");
// Check it we reached our target (from_id)
foreach (Xmpp.MessageStanza message in stanzas[query_id]) {
Xmpp.MessageArchiveManagement.MessageFlag? mam_message_flag = Xmpp.MessageArchiveManagement.MessageFlag.get_flag(message);
if (mam_message_flag != null && mam_message_flag.mam_id != null) {
if (after_id != null && mam_message_flag.mam_id == after_id) {
// Successfully fetched the whole range
page_result = PageResult.TargetReached;
var ret = new PageRequestResult(PageResult.TargetReached, query_result, stanzas[query_id]);
send_messages_back_into_pipeline(account, query_id);
return ret;
}
if (selection != null) selection += " OR ";
selection = @"$(db.message.server_id) = ?";
}
}
if (hitted_range.has_key(query_id)) {
if (hitted_range.has_key(query_id) && hitted_range[query_id] == -2) {
// Message got filtered out by xmpp-vala, but succesfull range fetch nevertheless
page_result = PageResult.TargetReached;
var ret = new PageRequestResult(PageResult.TargetReached, query_result, stanzas[query_id]);
send_messages_back_into_pipeline(account, query_id);
return ret;
}
int64 duplicates_found = db.message.select().where(selection, selection_args).count();
if (duplicates_found > 0) {
// Check for duplicates. Go through all messages and build a db query.
foreach (Xmpp.MessageStanza message in stanzas[query_id]) {
Xmpp.MessageArchiveManagement.MessageFlag? mam_message_flag = Xmpp.MessageArchiveManagement.MessageFlag.get_flag(message);
if (mam_message_flag != null && mam_message_flag.mam_id != null) {
if (selection == null) {
selection = @"$(db.message.server_id) = ?";
} else {
selection += @" OR $(db.message.server_id) = ?";
}
selection_args += mam_message_flag.mam_id;
}
}
var duplicates_qry = db.message.select()
.with(db.message.account_id, "=", account.id)
.where(selection, selection_args);
// We don't want messages from different MAM servers to interfere with each other.
if (!query_params.mam_server.equals_bare(account.bare_jid)) {
duplicates_qry.with(db.message.counterpart_id, "=", db.get_jid_id(query_params.mam_server));
} else {
duplicates_qry.with(db.message.type_, "=", Message.Type.CHAT);
}
var duplicates_count = duplicates_qry.count();
if (duplicates_count > 0) {
// We got a duplicate although we thought we have to catch up.
// There was a server bug where prosody would send all messages if it didn't know the after ID that was given
page_result = PageResult.Duplicate;
}
}
var res = new PageRequestResult() { stanzas=stanzas[query_id], page_result=page_result, query_result=query_result };
var res = new PageRequestResult(page_result, query_result, stanzas.has_key(query_id) ? stanzas[query_id] : null);
send_messages_back_into_pipeline(account, query_id);
return res;
}
private void send_messages_back_into_pipeline(Account account, string query_id) {
print(@"send_messages_back_into_pipeline $query_id\n");
if (!stanzas.has_key(query_id)) return;
foreach (Xmpp.MessageStanza message in stanzas[query_id]) {
stream_interactor.get_module(MessageProcessor.IDENTITY).run_pipeline_announce.begin(account, message);
}
stanzas.unset(query_id);
print(@"send_messages_back_into_pipeline $query_id done\n");
}
private void on_account_added(Account account) {
@ -458,7 +470,7 @@ public class Dino.HistorySync {
current_catchup_id[account] = new HashMap<Jid, int>(Jid.hash_func, Jid.equals_func);
stream_bak = stream;
debug("MAM: [%s] MAM available", account.bare_jid.to_string());
debug("[%s] MAM available", account.bare_jid.to_string());
fetch_everything.begin(account, account.bare_jid);
});
@ -490,14 +502,14 @@ public class Dino.HistorySync {
if (to_delete.contains(range1)) continue;
foreach (MamRange range2 in ranges[server_jid]) {
print(@"$(account.bare_jid) | $(server_jid) | $(range1.from_time) - $(range1.to_time) vs $(range2.from_time) - $(range2.to_time)\n");
debug("[%s | %s] | %s - %s vs %s - %s", account.bare_jid.to_string(), server_jid.to_string(), range1.from_time.to_string(), range1.to_time.to_string(), range2.from_time.to_string(), range2.to_time.to_string());
if (range1 == range2 || to_delete.contains(range2)) continue;
// Check if range2 is a subset of range1
// range1: #####################
// range2: ######
if (range1.from_time <= range2.from_time && range1.to_time >= range2.to_time) {
critical(@"MAM: Removing db range which is a subset of another one");
warning("Removing db range which is a subset of %li-%li", range1.from_time, range1.to_time);
to_delete.add(range2);
continue;
}
@ -505,37 +517,24 @@ public class Dino.HistorySync {
// Check if range2 is an extension of range1 (towards earlier)
// range1: #####################
// range2: ###############
if (range1.from_time <= range2.from_time <= range1.to_time && range1.to_time < range2.to_time) {
critical(@"MAM: Removing db range that overlapped another one (towards earlier)");
if (range1.from_time <= range2.to_time <= range1.to_time && range2.from_time <= range1.from_time) {
warning("Removing db range that overlapped %li-%li (towards earlier)", range1.from_time, range1.to_time);
db.mam_catchup.update()
.with(db.mam_catchup.id, "=", range1.id)
.set(db.mam_catchup.from_id, range2.to_id)
.set(db.mam_catchup.from_time, range2.to_time)
.set(db.mam_catchup.from_id, range2.from_id)
.set(db.mam_catchup.from_time, range2.from_time)
.set(db.mam_catchup.from_end, range2.from_end)
.perform();
to_delete.add(range2);
continue;
}
// Check if range2 is an extension of range1 (towards more current)
// range1: #####################
// range2: ###############
if (range1.from_time <= range2.from_time <= range1.to_time && range1.to_time < range2.to_time) {
critical(@"MAM: Removing db range that overlapped another one (towards more current)");
db.mam_catchup.update()
.with(db.mam_catchup.id, "=", range1.id)
.set(db.mam_catchup.to_id, range2.to_id)
.set(db.mam_catchup.to_time, range2.to_time)
.perform();
to_delete.add(range2);
continue;
}
}
}
}
foreach (MamRange row in to_delete) {
db.mam_catchup.delete().with(db.mam_catchup.id, "=", row.id).perform();
warning("Removing db range %s %li-%li", row.server_jid.to_string(), row.from_time, row.to_time);
}
}
@ -553,5 +552,11 @@ public class Dino.HistorySync {
public Gee.List<MessageStanza> stanzas { get; set; }
public PageResult page_result { get; set; }
public Xmpp.MessageArchiveManagement.QueryResult query_result { get; set; }
public PageRequestResult(PageResult page_result, Xmpp.MessageArchiveManagement.QueryResult query_result, Gee.List<MessageStanza>? stanzas) {
this.page_result = page_result;
this.query_result = query_result;
this.stanzas = stanzas;
}
}
}

View file

@ -64,7 +64,7 @@ public class JingleFileProvider : FileProvider, Object {
public JingleFileProvider(StreamInteractor stream_interactor) {
this.stream_interactor = stream_interactor;
stream_interactor.stream_negotiated.connect(on_stream_negotiated);
stream_interactor.account_added.connect(on_account_added);
}
public FileMeta get_file_meta(FileTransfer file_transfer) throws FileReceiveError {
@ -114,15 +114,14 @@ public class JingleFileProvider : FileProvider, Object {
return 1;
}
private void on_stream_negotiated(Account account, XmppStream stream) {
private void on_account_added(Account account) {
XmppStream stream = stream_interactor.get_stream(account);
stream_interactor.module_manager.get_module(account, Xmpp.Xep.JingleFileTransfer.Module.IDENTITY).file_incoming.connect((stream, jingle_file_transfer) => {
Conversation? conversation = stream_interactor.get_module(ConversationManager.IDENTITY).get_conversation(jingle_file_transfer.peer.bare_jid, account);
if (conversation == null) {
// TODO(hrxi): What to do?
return;
}
string id = random_uuid();
if (conversation == null) return;
string id = random_uuid();
file_transfers[id] = jingle_file_transfer;
FileMeta file_meta = new FileMeta();

View file

@ -72,7 +72,6 @@ public class MucManager : StreamInteractionModule, Object {
bool receive_history = true;
EntityInfo entity_info = stream_interactor.get_module(EntityInfo.IDENTITY);
bool can_do_mam = yield entity_info.has_feature(account, jid, Xmpp.MessageArchiveManagement.NS_URI_2);
print(@"$(jid) $can_do_mam\n");
if (can_do_mam) {
receive_history = false;
history_since = null;

View file

@ -261,7 +261,7 @@ public class Dino.Reactions : StreamInteractionModule, Object {
}
// Store reaction infos for later processing after we got the message
print(@"Got reaction for $message_id but dont have message yet $(db.get_jid_id(stanza.from.bare_jid))\n");
debug("Got reaction for %s but dont have message yet %s", message_id, db.get_jid_id(stanza.from.bare_jid).to_string());
if (!reaction_infos.has_key(message_id)) {
reaction_infos[message_id] = new ArrayList<ReactionInfo>();
}
@ -297,9 +297,15 @@ public class Dino.Reactions : StreamInteractionModule, Object {
}
if (reaction_info == null) return;
reaction_info_list.remove(reaction_info);
if (reaction_info_list.is_empty) reaction_infos.unset(message.stanza_id);
if (reaction_info_list.is_empty) {
if (conversation.type_ == Conversation.Type.GROUPCHAT) {
reaction_infos.unset(message.server_id);
} else {
reaction_infos.unset(message.stanza_id);
}
}
print(@"Got message for reaction\n");
debug("Got message for reaction %s", message.stanza_id);
process_reaction_for_message(message.id, reaction_info);
}
@ -364,7 +370,7 @@ public class Dino.Reactions : StreamInteractionModule, Object {
ReactionsTime reactions_time = null;
if (stanza.type_ == MessageStanza.TYPE_GROUPCHAT) {
reactions_time = get_muc_user_reactions(account, content_item_id, occupant_id, real_jid);
} else if (stanza.type_ == MessageStanza.TYPE_CHAT) {
} else {
reactions_time = get_chat_user_reactions(account, content_item_id, from_jid);
}
@ -374,10 +380,10 @@ public class Dino.Reactions : StreamInteractionModule, Object {
}
// Save reactions
if (stanza.type_ == MessageStanza.TYPE_CHAT) {
save_chat_reactions(account, from_jid, content_item_id, reaction_time_long, reactions);
} else if (stanza.type_ == MessageStanza.TYPE_GROUPCHAT) {
if (stanza.type_ == MessageStanza.TYPE_GROUPCHAT) {
save_muc_reactions(account, content_item_id, from_jid, occupant_id, real_jid, reaction_time_long, reactions);
} else {
save_chat_reactions(account, from_jid, content_item_id, reaction_time_long, reactions);
}
// Notify about reaction changes
@ -387,6 +393,8 @@ public class Dino.Reactions : StreamInteractionModule, Object {
if (stanza.type_ == MessageStanza.TYPE_GROUPCHAT &&
signal_jid.equals(stream_interactor.get_module(MucManager.IDENTITY).get_own_jid(from_jid, account))) {
signal_jid = account.bare_jid;
} else if (stanza.type_ == MessageStanza.TYPE_CHAT) {
signal_jid = signal_jid.bare_jid;
}
foreach (string current_reaction in current_reactions) {
@ -400,16 +408,14 @@ public class Dino.Reactions : StreamInteractionModule, Object {
}
}
print("reactions were: ");
debug("reactions were: ");
foreach (string reac in current_reactions) {
print(reac + " ");
debug(reac);
}
print("\n");
print("reactions new : ");
debug("reactions new : ");
foreach (string reac in reactions) {
print(reac + " ");
debug(reac);
}
print("\n");
}
private void save_chat_reactions(Account account, Jid jid, int content_item_id, long reaction_time, Gee.List<string> reactions) {

View file

@ -23,7 +23,6 @@ protected class ConferenceList {
this.stream_interactor = stream_interactor;
bookmarks_updated_handler_id = stream_interactor.get_module(MucManager.IDENTITY).bookmarks_updated.connect((account, conferences) => {
print(@"$(this == null) $(lists == null)\n");
lists[account] = conferences;
refresh_conferences();
});

View file

@ -453,7 +453,7 @@ public bool use_tooltips() {
}
public static void menu_button_set_icon_with_size(MenuButton menu_button, string icon_name, int pixel_size) {
#if GTK_4_6
#if GTK_4_6 && VALA_0_52
menu_button.set_child(new Image.from_icon_name(icon_name) { pixel_size=pixel_size });
#else
menu_button.set_icon_name(icon_name);

View file

@ -1,11 +1,4 @@
set(HTTP_FILES_DEFINITIONS)
if(USE_SOUP3)
set(Soup Soup3)
set(HTTP_FILES_DEFINITIONS ${HTTP_FILES_DEFINITIONS} SOUP_3)
else()
set(Soup Soup2)
endif()
include(SoupVersion)
find_packages(HTTP_FILES_PACKAGES REQUIRED
Gee
GLib
@ -15,6 +8,12 @@ find_packages(HTTP_FILES_PACKAGES REQUIRED
${Soup}
)
set(HTTP_FILES_DEFINITIONS)
if(${Soup}_VERSION VERSION_GREATER_EQUAL "3.0")
set(HTTP_FILES_DEFINITIONS ${HTTP_FILES_DEFINITIONS} SOUP_3_0)
endif()
vala_precompile(HTTP_FILES_VALA_C
SOURCES
src/file_provider.vala

View file

@ -46,7 +46,7 @@ public class FileProvider : Dino.FileProvider, Object {
}
}
private class LimitInputStream : InputStream {
private class LimitInputStream : InputStream, PollableInputStream {
InputStream inner;
int64 remaining_size;
@ -55,6 +55,20 @@ public class FileProvider : Dino.FileProvider, Object {
this.remaining_size = max_size;
}
public bool can_poll() {
return inner is PollableInputStream && ((PollableInputStream)inner).can_poll();
}
public PollableSource create_source(Cancellable? cancellable = null) {
if (!can_poll()) throw new IOError.NOT_SUPPORTED("Stream is not pollable");
return ((PollableInputStream)inner).create_source(cancellable);
}
public bool is_readable() {
if (!can_poll()) throw new IOError.NOT_SUPPORTED("Stream is not pollable");
return ((PollableInputStream)inner).is_readable();
}
private ssize_t check_limit(ssize_t read) throws IOError {
this.remaining_size -= read;
if (remaining_size < 0) throw new IOError.FAILED("Stream length exceeded limit");
@ -69,6 +83,11 @@ public class FileProvider : Dino.FileProvider, Object {
return check_limit(yield inner.read_async(buffer, io_priority, cancellable));
}
public ssize_t read_nonblocking_fn(uint8[] buffer) throws Error {
if (!is_readable()) throw new IOError.WOULD_BLOCK("Stream is not readable");
return read(buffer);
}
public override bool close(Cancellable? cancellable = null) throws IOError {
return inner.close(cancellable);
}
@ -101,7 +120,7 @@ public class FileProvider : Dino.FileProvider, Object {
head_message.request_headers.append("Accept-Encoding", "identity");
try {
#if SOUP_3
#if SOUP_3_0
yield session.send_async(head_message, GLib.Priority.LOW, null);
#else
yield session.send_async(head_message, null);
@ -136,7 +155,7 @@ public class FileProvider : Dino.FileProvider, Object {
var get_message = new Soup.Message("GET", http_receive_data.url);
try {
#if SOUP_3
#if SOUP_3_0
InputStream stream = yield session.send_async(get_message, GLib.Priority.LOW, file_transfer.cancellable);
#else
InputStream stream = yield session.send_async(get_message, file_transfer.cancellable);

View file

@ -73,7 +73,7 @@ public class HttpFileSender : FileSender, Object {
}
}
#if !SOUP_3
#if !SOUP_3_0
private static void transfer_more_bytes(InputStream stream, Soup.MessageBody body) {
uint8[] bytes = new uint8[4096];
ssize_t read = stream.read(bytes);
@ -93,7 +93,7 @@ public class HttpFileSender : FileSender, Object {
var session = new Soup.Session();
session.user_agent = @"Dino/$(Dino.get_short_version()) ";
var put_message = new Soup.Message("PUT", file_send_data.url_up);
#if SOUP_3
#if SOUP_3_0
put_message.set_request_body(file_meta.mime_type, file_transfer.input_stream, (ssize_t) file_meta.size);
#else
put_message.request_headers.set_content_type(file_meta.mime_type, null);
@ -106,7 +106,7 @@ public class HttpFileSender : FileSender, Object {
put_message.request_headers.append(entry.key, entry.value);
}
try {
#if SOUP_3
#if SOUP_3_0
yield session.send_async(put_message, GLib.Priority.LOW, file_transfer.cancellable);
#else
yield session.send_async(put_message, file_transfer.cancellable);

View file

@ -76,7 +76,7 @@ public class Handler {
int err = 0;
X509.PrivateKey private_key = X509.PrivateKey.create();
err = private_key.generate(PKAlgorithm.RSA, 2048);
err = private_key.generate(PKAlgorithm.ECDSA, 256);
throw_if_error(err);
var start_time = new DateTime.now_local().add_days(1);

View file

@ -221,7 +221,8 @@ namespace GnuTLS {
public enum PKAlgorithm {
UNKNOWN,
RSA,
DSA;
DSA,
ECDSA;
}
[CCode (cname = "gnutls_digest_algorithm_t", cprefix = "GNUTLS_DIG_", has_type_id = false)]

View file

@ -7,15 +7,6 @@ find_packages(ENGINE_PACKAGES REQUIRED
ICU
)
set(ENGINE_DEFINITIONS "")
find_package(GIO)
if(GIO_VERSION VERSION_GREATER "2.60")
message(STATUS "ALPN support enabled")
set(ENGINE_DEFINITIONS ALPN_SUPPORT)
else()
message(STATUS "No ALPN support, needs GIO >= 2.60")
endif()
set(ENGINE_EXTRA_OPTIONS ${MAIN_EXTRA_OPTIONS} --vapidir=${CMAKE_CURRENT_SOURCE_DIR}/vapi)
vala_precompile(ENGINE_VALA_C
@ -154,8 +145,6 @@ GENERATE_HEADER
xmpp-vala
CUSTOM_VAPIS
"${CMAKE_CURRENT_SOURCE_DIR}/src/glib_fixes.vapi"
DEFINITIONS
${ENGINE_DEFINITIONS}
OPTIONS
${ENGINE_EXTRA_OPTIONS}
)

View file

@ -19,7 +19,7 @@ public class Xmpp.DirectTlsXmppStream : TlsXmppStream {
debug("Connecting to %s:%i (tls)", host, port);
IOStream? io_stream = yield client.connect_to_host_async(host, port);
TlsConnection tls_connection = TlsClientConnection.new(io_stream, new NetworkAddress(remote_name.to_string(), port));
#if ALPN_SUPPORT
#if GLIB_2_60
tls_connection.set_advertised_protocols(ADVERTISED_PROTOCOLS);
#endif
tls_connection.accept_certificate.connect(on_invalid_certificate);

View file

@ -79,21 +79,18 @@ public class Module : XmppStreamModule {
// Build and send query
Iq.Stanza iq = new Iq.Stanza.set(query_node) { to=mam_server };
print(@"OUT:\n$(iq.stanza.to_string())\n");
Iq.Stanza result_iq = yield stream.get_module(Iq.Module.IDENTITY).send_iq_async(stream, iq);
print(result_iq.stanza.to_string() + "\n");
// Parse the response IQ into a QueryResult.
StanzaNode? fin_node = result_iq.stanza.get_subnode("fin", ns);
if (fin_node == null) { print(@"$ns a1\n"); res.malformed = true; return res; }
if (fin_node == null) { res.malformed = true; return res; }
StanzaNode? rsm_node = fin_node.get_subnode("set", Xmpp.ResultSetManagement.NS_URI);
if (rsm_node == null) { print("a2\n"); res.malformed = true; return res; }
if (rsm_node == null) { res.malformed = true; return res; }
res.first = rsm_node.get_deep_string_content("first");
res.last = rsm_node.get_deep_string_content("last");
if ((res.first == null) != (res.last == null)) { print("a3\n"); res.malformed = true; }
if ((res.first == null) != (res.last == null)) { res.malformed = true; return res; }
res.complete = fin_node.get_attribute_bool("complete", false, ns);
return res;

View file

@ -105,6 +105,7 @@ public class Module : BookmarksProvider, XmppStreamModule {
conference.name = conference_node.get_attribute("name", NS_URI);
conference.autojoin = conference_node.get_attribute_bool("autojoin", false, NS_URI);
conference.nick = conference_node.get_deep_string_content("nick");
conference.password = conference_node.get_deep_string_content("password");
return conference;
}