chore: fix PR reviews

pull/3080/head
Audric Ackermann 2 months ago
parent faa24ce9a6
commit 51c307af25

@ -77,7 +77,7 @@ const getCategories = (): Array<{ id: SessionSettingCategory; title: string }> =
title: window.i18n('recoveryPhrase'), title: window.i18n('recoveryPhrase'),
}, },
{ {
id: 'ClearData' as const, id: 'clearData' as const,
title: window.i18n('clearDataSettingsTitle'), title: window.i18n('clearDataSettingsTitle'),
}, },
]; ];
@ -93,7 +93,7 @@ const LeftPaneSettingsCategoryRow = (props: {
const dataTestId = `${title.toLowerCase().replace(' ', '-')}-settings-menu-item`; const dataTestId = `${title.toLowerCase().replace(' ', '-')}-settings-menu-item`;
const isClearData = id === 'ClearData'; const isClearData = id === 'clearData';
return ( return (
<StyledSettingsListItem <StyledSettingsListItem
@ -111,7 +111,7 @@ const LeftPaneSettingsCategoryRow = (props: {
case 'recoveryPhrase': case 'recoveryPhrase':
dispatch(recoveryPhraseModal({})); dispatch(recoveryPhraseModal({}));
break; break;
case 'ClearData': case 'clearData':
dispatch(updateDeleteAccountModal({})); dispatch(updateDeleteAccountModal({}));
break; break;
default: default:

@ -117,7 +117,7 @@ const SettingInCategory = (props: {
return <SettingsCategoryPermissions />; return <SettingsCategoryPermissions />;
// these three down there have no options, they are just a button // these three down there have no options, they are just a button
case 'ClearData': case 'clearData':
case 'messageRequests': case 'messageRequests':
case 'recoveryPhrase': case 'recoveryPhrase':
default: default:

@ -44,7 +44,7 @@ export const SettingsHeader = (props: Props) => {
case 'privacy': case 'privacy':
categoryTitle = window.i18n('privacySettingsTitle'); categoryTitle = window.i18n('privacySettingsTitle');
break; break;
case 'ClearData': case 'clearData':
case 'messageRequests': case 'messageRequests':
case 'recoveryPhrase': case 'recoveryPhrase':
throw new Error(`no header for should be tried to be rendered for "${category}"`); throw new Error(`no header for should be tried to be rendered for "${category}"`);

@ -12,6 +12,7 @@ import { allowOnlyOneAtATime } from '../../utils/Promise';
import { APPLICATION_JSON } from '../../../types/MIME'; import { APPLICATION_JSON } from '../../../types/MIME';
import { isLinux } from '../../../OS'; import { isLinux } from '../../../OS';
import { Snode } from '../../../data/data'; import { Snode } from '../../../data/data';
import { GetServicesNodesFromSeedRequest } from '../snode_api/SnodeRequestTypes';
/** /**
* Fetch all snodes from seed nodes. * Fetch all snodes from seed nodes.
@ -228,25 +229,20 @@ async function getSnodesFromSeedUrl(urlObj: URL): Promise<Array<any>> {
// we get all active nodes // we get all active nodes
window?.log?.info(`getSnodesFromSeedUrl starting with ${urlObj.href}`); window?.log?.info(`getSnodesFromSeedUrl starting with ${urlObj.href}`);
const params = {
active_only: true,
// If you are thinking of adding the `limit` field here: don't.
// We fetch the full list because when we retrieve it we also remove from all the swarms we already know, any snode not part of that fetched list.
// If the limit was set, we would remove a lot of valid snodes from the swarms we've already fetched.
fields: {
public_ip: true,
storage_port: true,
pubkey_x25519: true,
pubkey_ed25519: true,
},
};
const endpoint = 'json_rpc'; const endpoint = 'json_rpc';
const url = `${urlObj.href}${endpoint}`; const url = `${urlObj.href}${endpoint}`;
const body = { const body: GetServicesNodesFromSeedRequest = {
jsonrpc: '2.0', jsonrpc: '2.0',
method: 'get_n_service_nodes', method: 'get_n_service_nodes',
params, params: {
active_only: true,
fields: {
public_ip: true,
storage_port: true,
pubkey_x25519: true,
pubkey_ed25519: true,
},
},
}; };
const sslAgent = await getSslAgentForSeedNode( const sslAgent = await getSslAgentForSeedNode(

@ -67,22 +67,42 @@ export type OnsResolveSubRequest = {
}; };
}; };
/**
* If you are thinking of adding the `limit` field here: don't.
* We fetch the full list because we will remove from every cached swarms the snodes not found in that fresh list.
* If a `limit` was set, we would remove a lot of valid snodes from those cached swarms.
*/
type FetchSnodeListParams = {
active_only: true;
fields: {
public_ip: true;
storage_port: true;
pubkey_x25519: true;
pubkey_ed25519: true;
};
};
export type GetServicesNodesFromSeedRequest = {
method: 'get_n_service_nodes';
jsonrpc: '2.0';
/**
* If you are thinking of adding the `limit` field here: don't.
* We fetch the full list because we will remove from every cached swarms the snodes not found in that fresh list.
* If the limit was set, we would remove a lot of valid snodes from the swarms we've already fetched.
*/
params: FetchSnodeListParams;
};
export type GetServiceNodesSubRequest = { export type GetServiceNodesSubRequest = {
method: 'oxend_request'; method: 'oxend_request';
params: { params: {
endpoint: 'get_service_nodes'; endpoint: 'get_service_nodes';
params: { /**
active_only: true; * If you are thinking of adding the `limit` field here: don't.
// If you are thinking of adding the `limit` field here: don't. * We fetch the full list because we will remove from every cached swarms the snodes not found in that fresh list.
// We fetch the full list because when we retrieve it we also remove from all the swarms we already know, any snode not part of that fetched list. * If the limit was set, we would remove a lot of valid snodes from the swarms we've already fetched.
// If the limit was set, we would remove a lot of valid snodes from the swarms we've already fetched. */
fields: { params: FetchSnodeListParams;
public_ip: true;
storage_port: true;
pubkey_x25519: true;
pubkey_ed25519: true;
};
};
}; };
}; };

@ -13,9 +13,6 @@ function buildSnodeListRequests(): Array<GetServiceNodesSubRequest> {
endpoint: 'get_service_nodes', endpoint: 'get_service_nodes',
params: { params: {
active_only: true, active_only: true,
// If you are thinking of adding the `limit` field here: don't.
// We fetch the full list because when we retrieve it we also remove from all the swarms we already know, any snode not part of that fetched list.
// If the limit was set, we would remove a lot of valid snodes from the swarms we've already fetched.
fields: { fields: {
public_ip: true, public_ip: true,
storage_port: true, storage_port: true,

@ -5,7 +5,7 @@ import { doSnodeBatchRequest } from './batchRequest';
import { GetNetworkTime } from './getNetworkTime'; import { GetNetworkTime } from './getNetworkTime';
import { SnodeNamespace, SnodeNamespaces } from './namespaces'; import { SnodeNamespace, SnodeNamespaces } from './namespaces';
import { TTL_DEFAULT } from '../../constants'; import { DURATION, TTL_DEFAULT } from '../../constants';
import { UserUtils } from '../../utils'; import { UserUtils } from '../../utils';
import { sleepFor } from '../../utils/Promise'; import { sleepFor } from '../../utils/Promise';
import { import {
@ -124,7 +124,7 @@ async function retrieveNextMessages(
); );
// let exceptions bubble up // let exceptions bubble up
// no retry for this one as this a call we do every few seconds while polling for messages // no retry for this one as this a call we do every few seconds while polling for messages
const timeOutMs = 10 * 1000; // yes this is a long timeout for just messages, but 4s timeouts way to often... const timeOutMs = 10 * DURATION.SECONDS; // yes this is a long timeout for just messages, but 4s timeouts way to often...
const timeoutPromise = async () => sleepFor(timeOutMs); const timeoutPromise = async () => sleepFor(timeOutMs);
const fetchPromise = async () => const fetchPromise = async () =>
doSnodeBatchRequest(retrieveRequestsParams, targetNode, timeOutMs, associatedWith); doSnodeBatchRequest(retrieveRequestsParams, targetNode, timeOutMs, associatedWith);
@ -166,7 +166,7 @@ async function retrieveNextMessages(
GetNetworkTime.handleTimestampOffsetFromNetwork('retrieve', bodyFirstResult.t); GetNetworkTime.handleTimestampOffsetFromNetwork('retrieve', bodyFirstResult.t);
// NOTE: We don't want to sort messages here because the ordering depends on the snode and when it received each messages. // NOTE: We don't want to sort messages here because the ordering depends on the snode and when it received each message.
// The last_hash for that snode has to be the last one we've received from that same snode, othwerwise we end up fetching the same messages over and over again. // The last_hash for that snode has to be the last one we've received from that same snode, othwerwise we end up fetching the same messages over and over again.
return results.map((result, index) => ({ return results.map((result, index) => ({
code: result.code, code: result.code,

@ -1,7 +1,7 @@
import { isBoolean } from 'lodash'; import { isBoolean } from 'lodash';
import { PayloadAction, createSlice } from '@reduxjs/toolkit'; import { PayloadAction, createSlice } from '@reduxjs/toolkit';
import { SettingsKey } from '../../data/settings-key'; // ok: does not import anything else import { SettingsKey } from '../../data/settings-key';
const SettingsBoolsKeyTrackedInRedux = [ const SettingsBoolsKeyTrackedInRedux = [
SettingsKey.someDeviceOutdatedSyncing, SettingsKey.someDeviceOutdatedSyncing,

@ -12,7 +12,7 @@ export type SessionSettingCategory =
| 'permissions' | 'permissions'
| 'help' | 'help'
| 'recoveryPhrase' | 'recoveryPhrase'
| 'ClearData'; | 'clearData';
export type PasswordAction = 'set' | 'change' | 'remove' | 'enter'; export type PasswordAction = 'set' | 'change' | 'remove' | 'enter';

Loading…
Cancel
Save