chore: fix PR reviews

pull/3080/head
Audric Ackermann 3 weeks ago
parent faa24ce9a6
commit 51c307af25

@ -77,7 +77,7 @@ const getCategories = (): Array<{ id: SessionSettingCategory; title: string }> =
title: window.i18n('recoveryPhrase'),
},
{
id: 'ClearData' as const,
id: 'clearData' as const,
title: window.i18n('clearDataSettingsTitle'),
},
];
@ -93,7 +93,7 @@ const LeftPaneSettingsCategoryRow = (props: {
const dataTestId = `${title.toLowerCase().replace(' ', '-')}-settings-menu-item`;
const isClearData = id === 'ClearData';
const isClearData = id === 'clearData';
return (
<StyledSettingsListItem
@ -111,7 +111,7 @@ const LeftPaneSettingsCategoryRow = (props: {
case 'recoveryPhrase':
dispatch(recoveryPhraseModal({}));
break;
case 'ClearData':
case 'clearData':
dispatch(updateDeleteAccountModal({}));
break;
default:

@ -117,7 +117,7 @@ const SettingInCategory = (props: {
return <SettingsCategoryPermissions />;
// these three down there have no options, they are just a button
case 'ClearData':
case 'clearData':
case 'messageRequests':
case 'recoveryPhrase':
default:

@ -44,7 +44,7 @@ export const SettingsHeader = (props: Props) => {
case 'privacy':
categoryTitle = window.i18n('privacySettingsTitle');
break;
case 'ClearData':
case 'clearData':
case 'messageRequests':
case 'recoveryPhrase':
throw new Error(`no header for should be tried to be rendered for "${category}"`);

@ -12,6 +12,7 @@ import { allowOnlyOneAtATime } from '../../utils/Promise';
import { APPLICATION_JSON } from '../../../types/MIME';
import { isLinux } from '../../../OS';
import { Snode } from '../../../data/data';
import { GetServicesNodesFromSeedRequest } from '../snode_api/SnodeRequestTypes';
/**
* Fetch all snodes from seed nodes.
@ -228,25 +229,20 @@ async function getSnodesFromSeedUrl(urlObj: URL): Promise<Array<any>> {
// we get all active nodes
window?.log?.info(`getSnodesFromSeedUrl starting with ${urlObj.href}`);
const params = {
active_only: true,
// If you are thinking of adding the `limit` field here: don't.
// We fetch the full list because when we retrieve it we also remove from all the swarms we already know, any snode not part of that fetched list.
// If the limit was set, we would remove a lot of valid snodes from the swarms we've already fetched.
fields: {
public_ip: true,
storage_port: true,
pubkey_x25519: true,
pubkey_ed25519: true,
},
};
const endpoint = 'json_rpc';
const url = `${urlObj.href}${endpoint}`;
const body = {
const body: GetServicesNodesFromSeedRequest = {
jsonrpc: '2.0',
method: 'get_n_service_nodes',
params,
params: {
active_only: true,
fields: {
public_ip: true,
storage_port: true,
pubkey_x25519: true,
pubkey_ed25519: true,
},
},
};
const sslAgent = await getSslAgentForSeedNode(

@ -67,22 +67,42 @@ export type OnsResolveSubRequest = {
};
};
/**
* If you are thinking of adding the `limit` field here: don't.
* We fetch the full list because we will remove from every cached swarms the snodes not found in that fresh list.
* If a `limit` was set, we would remove a lot of valid snodes from those cached swarms.
*/
type FetchSnodeListParams = {
active_only: true;
fields: {
public_ip: true;
storage_port: true;
pubkey_x25519: true;
pubkey_ed25519: true;
};
};
export type GetServicesNodesFromSeedRequest = {
method: 'get_n_service_nodes';
jsonrpc: '2.0';
/**
* If you are thinking of adding the `limit` field here: don't.
* We fetch the full list because we will remove from every cached swarms the snodes not found in that fresh list.
* If the limit was set, we would remove a lot of valid snodes from the swarms we've already fetched.
*/
params: FetchSnodeListParams;
};
export type GetServiceNodesSubRequest = {
method: 'oxend_request';
params: {
endpoint: 'get_service_nodes';
params: {
active_only: true;
// If you are thinking of adding the `limit` field here: don't.
// We fetch the full list because when we retrieve it we also remove from all the swarms we already know, any snode not part of that fetched list.
// If the limit was set, we would remove a lot of valid snodes from the swarms we've already fetched.
fields: {
public_ip: true;
storage_port: true;
pubkey_x25519: true;
pubkey_ed25519: true;
};
};
/**
* If you are thinking of adding the `limit` field here: don't.
* We fetch the full list because we will remove from every cached swarms the snodes not found in that fresh list.
* If the limit was set, we would remove a lot of valid snodes from the swarms we've already fetched.
*/
params: FetchSnodeListParams;
};
};

@ -13,9 +13,6 @@ function buildSnodeListRequests(): Array<GetServiceNodesSubRequest> {
endpoint: 'get_service_nodes',
params: {
active_only: true,
// If you are thinking of adding the `limit` field here: don't.
// We fetch the full list because when we retrieve it we also remove from all the swarms we already know, any snode not part of that fetched list.
// If the limit was set, we would remove a lot of valid snodes from the swarms we've already fetched.
fields: {
public_ip: true,
storage_port: true,

@ -5,7 +5,7 @@ import { doSnodeBatchRequest } from './batchRequest';
import { GetNetworkTime } from './getNetworkTime';
import { SnodeNamespace, SnodeNamespaces } from './namespaces';
import { TTL_DEFAULT } from '../../constants';
import { DURATION, TTL_DEFAULT } from '../../constants';
import { UserUtils } from '../../utils';
import { sleepFor } from '../../utils/Promise';
import {
@ -124,7 +124,7 @@ async function retrieveNextMessages(
);
// let exceptions bubble up
// no retry for this one as this a call we do every few seconds while polling for messages
const timeOutMs = 10 * 1000; // yes this is a long timeout for just messages, but 4s timeouts way to often...
const timeOutMs = 10 * DURATION.SECONDS; // yes this is a long timeout for just messages, but 4s timeouts way to often...
const timeoutPromise = async () => sleepFor(timeOutMs);
const fetchPromise = async () =>
doSnodeBatchRequest(retrieveRequestsParams, targetNode, timeOutMs, associatedWith);
@ -166,7 +166,7 @@ async function retrieveNextMessages(
GetNetworkTime.handleTimestampOffsetFromNetwork('retrieve', bodyFirstResult.t);
// NOTE: We don't want to sort messages here because the ordering depends on the snode and when it received each messages.
// NOTE: We don't want to sort messages here because the ordering depends on the snode and when it received each message.
// The last_hash for that snode has to be the last one we've received from that same snode, othwerwise we end up fetching the same messages over and over again.
return results.map((result, index) => ({
code: result.code,

@ -1,7 +1,7 @@
import { isBoolean } from 'lodash';
import { PayloadAction, createSlice } from '@reduxjs/toolkit';
import { SettingsKey } from '../../data/settings-key'; // ok: does not import anything else
import { SettingsKey } from '../../data/settings-key';
const SettingsBoolsKeyTrackedInRedux = [
SettingsKey.someDeviceOutdatedSyncing,

@ -12,7 +12,7 @@ export type SessionSettingCategory =
| 'permissions'
| 'help'
| 'recoveryPhrase'
| 'ClearData';
| 'clearData';
export type PasswordAction = 'set' | 'change' | 'remove' | 'enter';

Loading…
Cancel
Save