Skip to content
This repository was archived by the owner on Nov 15, 2023. It is now read-only.

Document more TODOs as tickets #1418

Merged
merged 11 commits into from
Jan 30, 2019
5 changes: 2 additions & 3 deletions core/basic-authorship/src/basic_authorship.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@

//! A consensus proposer for "basic" chains which use the primitive inherent-data.

// FIXME: move this into substrate-consensus-common - https://github.com/paritytech/substrate/issues/1021

// FIXME #1021 move this into substrate-consensus-common
//
use std::{sync::Arc, self};

use log::{info, trace};
Expand Down Expand Up @@ -197,7 +197,6 @@ impl<Block, C, A> Proposer<Block, C, A> where
let pending_iterator = self.transaction_pool.ready();

for pending in pending_iterator {
// TODO [ToDr] Probably get rid of it, and validate in runtime.
let encoded_size = pending.data.encode().len();
if pending_size + encoded_size >= MAX_TRANSACTIONS_SIZE { break }

Expand Down
5 changes: 2 additions & 3 deletions core/client/db/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -307,7 +307,6 @@ where Block: BlockT<Hash=H256>,
}

fn reset_storage(&mut self, mut top: StorageMap, children: ChildrenStorageMap) -> Result<H256, client::error::Error> {
// TODO: wipe out existing trie.

if top.iter().any(|(k, _)| well_known_keys::is_child_storage_key(k)) {
return Err(client::error::ErrorKind::GenesisInvalid.into());
Expand Down Expand Up @@ -384,7 +383,7 @@ struct DbGenesisStorage(pub H256);
impl DbGenesisStorage {
pub fn new() -> Self {
let mut root = H256::default();
let mut mdb = MemoryDB::<Blake2Hasher>::default(); // TODO: use new() to make it more correct
let mut mdb = MemoryDB::<Blake2Hasher>::default();
state_machine::TrieDBMut::<Blake2Hasher>::new(&mut mdb, &mut root);
DbGenesisStorage(root)
}
Expand Down Expand Up @@ -1024,7 +1023,7 @@ mod tests {

fn prepare_changes(changes: Vec<(Vec<u8>, Vec<u8>)>) -> (H256, MemoryDB<Blake2Hasher>) {
let mut changes_root = H256::default();
let mut changes_trie_update = MemoryDB::<Blake2Hasher>::default(); // TODO: change to new() to make more correct
let mut changes_trie_update = MemoryDB::<Blake2Hasher>::default();
{
let mut trie = TrieDBMut::<Blake2Hasher>::new(
&mut changes_trie_update,
Expand Down
2 changes: 0 additions & 2 deletions core/client/db/src/light.rs
Original file line number Diff line number Diff line change
Expand Up @@ -341,7 +341,6 @@ impl<Block> LightBlockchainStorage<Block> for LightStorage<Block>
// update block number to hash lookup entries.
for retracted in tree_route.retracted() {
if retracted.hash == meta.finalized_hash {
// TODO: can we recover here?
warn!("Safety failure: reverting finalized block {:?}",
(&retracted.number, &retracted.hash));
}
Expand Down Expand Up @@ -438,7 +437,6 @@ impl<Block> LightBlockchainStorage<Block> for LightStorage<Block>
fn finalize_header(&self, id: BlockId<Block>) -> ClientResult<()> {
if let Some(header) = self.header(id)? {
let mut transaction = DBTransaction::new();
// TODO: ensure best chain contains this block.
let hash = header.hash();
let number = *header.number();
self.note_finalized(&mut transaction, &header, hash.clone())?;
Expand Down
2 changes: 1 addition & 1 deletion core/client/db/src/storage_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ pub type SharedCache<B, H> = Arc<Mutex<Cache<B, H>>>;

/// Create new shared cache instance with given max memory usage.
pub fn new_shared_cache<B: Block, H: Hasher>(shared_cache_size: usize) -> SharedCache<B, H> {
let cache_items = shared_cache_size / 100; // Estimated average item size. TODO: more accurate tracking
let cache_items = shared_cache_size / 100; // Guestimate, potentially inaccurate
Arc::new(Mutex::new(Cache {
storage: LruCache::new(cache_items),
hashes: LruCache::new(cache_items),
Expand Down
1 change: 0 additions & 1 deletion core/client/src/call_executor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,6 @@ where
native_call: Option<NC>,
) -> Result<NativeOrEncoded<R>, error::Error> where ExecutionManager<EM>: Clone {
let state = self.backend.state_at(*at)?;
//TODO: Find a better way to prevent double block initialization
if method != "Core_initialise_block" && initialised_block.map(|id| id != *at).unwrap_or(true) {
let header = prepare_environment_block()?;
state_machine::execute_using_consensus_failure_handler::<
Expand Down
20 changes: 6 additions & 14 deletions core/client/src/client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,6 @@ pub trait BlockBody<Block: BlockT> {
}

/// Client info
// TODO: split queue info from chain info and amalgamate into single struct.
#[derive(Debug)]
pub struct ClientInfo<Block: BlockT> {
/// Best block hash.
Expand Down Expand Up @@ -325,7 +324,6 @@ impl<B, E, Block, RA> Client<B, E, Block, RA> where

/// Get the RuntimeVersion at a given block.
pub fn runtime_version_at(&self, id: &BlockId<Block>) -> error::Result<RuntimeVersion> {
// TODO: Post Poc-2 return an error if version is missing
self.executor.runtime_version(id)
}

Expand Down Expand Up @@ -738,11 +736,9 @@ impl<B, E, Block, RA> Client<B, E, Block, RA> where
self.apply_finality_with_block_hash(operation, parent_hash, None, last_best, make_notifications)?;
}

// TODO: correct path logic for when to execute this function
// https://github.com/paritytech/substrate/issues/1232
// FIXME #1232: correct path logic for when to execute this function
let (storage_update,changes_update,storage_changes) = self.block_execution(&operation.op, &import_headers, origin, hash, body.clone())?;

// TODO: non longest-chain rule.
let is_new_best = finalized || match fork_choice {
ForkChoiceStrategy::LongestChain => import_headers.post().number() > &last_best_number,
ForkChoiceStrategy::Custom(v) => v,
Expand Down Expand Up @@ -880,7 +876,7 @@ impl<B, E, Block, RA> Client<B, E, Block, RA> where
// if the block is not a direct ancestor of the current best chain,
// then some other block is the common ancestor.
if route_from_best.common_block().hash != block {
// TODO: reorganize best block to be the best chain containing
// FIXME: #1442 reorganize best block to be the best chain containing
// `block`.
}

Expand Down Expand Up @@ -1020,7 +1016,7 @@ impl<B, E, Block, RA> Client<B, E, Block, RA> where

/// Get block status.
pub fn block_status(&self, id: &BlockId<Block>) -> error::Result<BlockStatus> {
// TODO: more efficient implementation
// this can probably be implemented more efficiently
if let BlockId::Hash(ref h) = id {
if self.importing_block.read().as_ref().map_or(false, |importing| h == importing) {
return Ok(BlockStatus::Queued);
Expand Down Expand Up @@ -1073,10 +1069,9 @@ impl<B, E, Block, RA> Client<B, E, Block, RA> where
/// If `maybe_max_block_number` is `Some(max_block_number)`
/// the search is limited to block `numbers <= max_block_number`.
/// in other words as if there were no blocks greater `max_block_number`.
///
/// TODO [snd] possibly implement this on blockchain::Backend and just redirect here
/// TODO : we want to move this implement to `blockchain::Backend`, see [#1443](https://github.com/paritytech/substrate/issues/1443)
/// Returns `Ok(None)` if `target_hash` is not found in search space.
/// TODO [snd] write down time complexity
/// TODO: document time complexity of this, see [#1444](https://github.com/paritytech/substrate/issues/1444)
pub fn best_containing(&self, target_hash: Block::Hash, maybe_max_number: Option<NumberFor<Block>>)
-> error::Result<Option<Block::Hash>>
{
Expand Down Expand Up @@ -1140,7 +1135,6 @@ impl<B, E, Block, RA> Client<B, E, Block, RA> where
// waiting until we are <= max_number
if let Some(max_number) = maybe_max_number {
loop {
// TODO [snd] this should be a panic
let current_header = self.backend.blockchain().header(BlockId::Hash(current_hash.clone()))?
.ok_or_else(|| error::Error::from(format!("failed to get header for hash {}", current_hash)))?;

Expand All @@ -1160,7 +1154,6 @@ impl<B, E, Block, RA> Client<B, E, Block, RA> where
return Ok(Some(best_hash));
}

// TODO [snd] this should be a panic
let current_header = self.backend.blockchain().header(BlockId::Hash(current_hash.clone()))?
.ok_or_else(|| error::Error::from(format!("failed to get header for hash {}", current_hash)))?;

Expand All @@ -1176,8 +1169,7 @@ impl<B, E, Block, RA> Client<B, E, Block, RA> where
// header may be on a dead fork -- the only leaves that are considered are
// those which can still be finalized.
//
// TODO: only issue this warning when not on a dead fork
// part of https://github.com/paritytech/substrate/issues/1558
// FIXME #1558 only issue this warning when not on a dead fork
warn!(
"Block {:?} exists in chain but not found when following all \
leaves backwards. Number limit = {:?}",
Expand Down
2 changes: 1 addition & 1 deletion core/client/src/light/blockchain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ impl<S, F, Block> BlockchainHeaderBackend<Block> for Blockchain<S, F> where Bloc

impl<S, F, Block> BlockchainBackend<Block> for Blockchain<S, F> where Block: BlockT, S: Storage<Block>, F: Fetcher<Block> {
fn body(&self, _id: BlockId<Block>) -> ClientResult<Option<Vec<Block::Extrinsic>>> {
// TODO [light]: fetch from remote node
// TODO: #1445 fetch from remote node
Ok(None)
}

Expand Down
7 changes: 3 additions & 4 deletions core/consensus/aura/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -392,7 +392,7 @@ enum CheckedHeader<H> {
/// check a header has been signed by the right key. If the slot is too far in the future, an error will be returned.
/// if it's successful, returns the pre-header, the slot number, and the signat.
//
// FIXME: needs misbehavior types - https://github.com/paritytech/substrate/issues/1018
// FIXME #1018 needs misbehavior types
fn check_header<B: Block>(slot_now: u64, mut header: B::Header, hash: B::Hash, authorities: &[Ed25519AuthorityId])
-> Result<CheckedHeader<B::Header>, String>
where DigestItemFor<B>: CompatibleDigestItem
Expand Down Expand Up @@ -537,8 +537,7 @@ impl<B: Block, C, E> Verifier<B> for AuraVerifier<C, E> where
);

// we add one to allow for some small drift.
// FIXME: in the future, alter this queue to allow deferring of headers
// https://github.com/paritytech/substrate/issues/1019
// FIXME #1019 in the future, alter this queue to allow deferring of headers
let checked_header = check_header::<B>(slot_now + 1, header, hash, &authorities[..])?;
match checked_header {
CheckedHeader::Checked(pre_header, slot_num, sig) => {
Expand Down Expand Up @@ -577,7 +576,7 @@ impl<B: Block, C, E> Verifier<B> for AuraVerifier<C, E> where
fork_choice: ForkChoiceStrategy::LongestChain,
};

// FIXME: extract authorities - https://github.com/paritytech/substrate/issues/1019
// FIXME #1019 extract authorities
Ok((import_block, None))
}
CheckedHeader::Deferred(a, b) => {
Expand Down
4 changes: 2 additions & 2 deletions core/consensus/common/src/import_queue.rs
Original file line number Diff line number Diff line change
Expand Up @@ -401,7 +401,7 @@ pub fn import_single_block<B: BlockT, V: Verifier<B>>(
} else {
debug!(target: "sync", "Header {} was not provided ", block.hash);
}
return Err(BlockImportError::IncompleteHeader(peer)) //TODO: use persistent ID
return Err(BlockImportError::IncompleteHeader(peer))
},
};

Expand Down Expand Up @@ -441,7 +441,7 @@ pub fn import_single_block<B: BlockT, V: Verifier<B>>(
},
Ok(ImportResult::KnownBad) => {
debug!(target: "sync", "Peer gave us a bad block {}: {:?}", number, hash);
Err(BlockImportError::BadBlock(peer)) //TODO: use persistent ID
Err(BlockImportError::BadBlock(peer))
},
Err(e) => {
debug!(target: "sync", "Error importing block {}: {:?}: {:?}", number, hash, e);
Expand Down
9 changes: 3 additions & 6 deletions core/consensus/rhd/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
//! set for this block height.

#![cfg(feature="rhd")]
// FIXME: doesn't compile - https://github.com/paritytech/substrate/issues/1020
// FIXME #1020 doesn't compile

use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering};
Expand Down Expand Up @@ -432,7 +432,6 @@ impl<B, P, I, InStream, OutSink> Drop for BftFuture<B, P, I, InStream, OutSink>
OutSink: Sink<SinkItem=Communication<B>, SinkError=Error>,
{
fn drop(&mut self) {
// TODO: have a trait member to pass misbehavior reports into.
let misbehavior = self.inner.drain_misbehavior().collect::<Vec<_>>();
self.inner.context().proposer.import_misbehavior(misbehavior);
}
Expand Down Expand Up @@ -466,7 +465,7 @@ pub struct BftService<B: Block, P, I> {
live_agreement: Mutex<Option<(B::Header, AgreementHandle)>>,
round_cache: Arc<Mutex<RoundCache<B::Hash>>>,
round_timeout_multiplier: u64,
key: Arc<ed25519::Pair>, // TODO: key changing over time.
key: Arc<ed25519::Pair>,
factory: P,
}

Expand All @@ -488,14 +487,13 @@ impl<B, P, I> BftService<B, P, I>
start_round: 0,
})),
round_timeout_multiplier: 10,
key: key, // TODO: key changing over time.
key: key,
factory,
}
}

/// Get the local Authority ID.
pub fn local_id(&self) -> AuthorityId {
// TODO: based on a header and some keystore.
self.key.public().into()
}

Expand Down Expand Up @@ -1084,7 +1082,6 @@ impl<C, A> BaseProposer<<C as AuthoringApi>::Block> for Proposer<C, A> where
self.transaction_pool.ready(|pending_iterator| {
let mut pending_size = 0;
for pending in pending_iterator {
// TODO [ToDr] Probably get rid of it, and validate in runtime.
let encoded_size = pending.data.encode().len();
if pending_size + encoded_size >= MAX_TRANSACTIONS_SIZE { break }

Expand Down
1 change: 0 additions & 1 deletion core/executor/src/native_executor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -268,7 +268,6 @@ macro_rules! native_executor_instance {
native_executor_instance!(IMPL $name, $dispatcher, $version, $code);
};
(IMPL $name:ident, $dispatcher:path, $version:path, $code:expr) => {
// TODO: this is not so great – I think I should go back to have dispatch take a type param and modify this macro to accept a type param and then pass it in from the test-client instead
use primitives::Blake2Hasher as _Blake2Hasher;
impl $crate::NativeExecutionDispatch for $name {
fn native_equivalent() -> &'static [u8] {
Expand Down
1 change: 0 additions & 1 deletion core/executor/src/wasm_executor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,6 @@ impl ReadPrimitive<u32> for MemoryInstance {
}
}

// TODO: this macro does not support `where` clauses and that seems somewhat tricky to add
impl_function_executor!(this: FunctionExecutor<'e, E>,
ext_print_utf8(utf8_data: *const u8, utf8_len: u32) => {
if let Ok(utf8) = this.memory.get(utf8_data, utf8_len as usize) {
Expand Down
3 changes: 1 addition & 2 deletions core/finality-grandpa/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -577,7 +577,6 @@ impl<B, E, Block: BlockT<Hash=H256>, N, RA> voter::Environment<Block::Hash, Numb
let prevote_timer = Delay::new(now + self.config.gossip_duration * 2);
let precommit_timer = Delay::new(now + self.config.gossip_duration * 4);

// TODO: dispatch this with `mpsc::spawn`.
let incoming = ::communication::checked_message_stream::<Block, _>(
round,
self.set_id,
Expand Down Expand Up @@ -857,7 +856,7 @@ fn finalize_block<B, Block: BlockT<Hash=H256>, E, RA>(
// lock must be held through writing to DB to avoid race
let mut authority_set = authority_set.inner().write();

// TODO [andre]: clone only when changed (#1483)
// FIXME #1483: clone only when changed
let old_authority_set = authority_set.clone();
// needed in case there is an authority set change, used for reverting in
// case of error
Expand Down
2 changes: 1 addition & 1 deletion core/keystore/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ pub struct InvalidPassword;
struct EncryptedKey {
mac: [u8; 32],
salt: [u8; 32],
ciphertext: Vec<u8>, // TODO: switch to fixed-size when serde supports
ciphertext: Vec<u8>, // FIXME: switch to fixed-size when serde supports
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@rphmeier not quite sure I understand the problem here. Would you know the serde ticket this waits for?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I assume that it is the following problem: serde-rs/serde#573

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So, could be put into a Easy ticket.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yep, the ciphertext for an encrypted ed25519 key is always the same size because ed25519 keys are 32 bytes

iv: [u8; 16],
iterations: u32,
}
Expand Down
1 change: 0 additions & 1 deletion core/network/src/consensus_gossip.rs
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,6 @@ impl<B: BlockT> ConsensusGossip<B> {
if roles.intersects(Roles::AUTHORITY) {
trace!(target:"gossip", "Registering {:?} {}", roles, who);
// Send out all known messages to authorities.
// TODO: limit by size
let mut known_messages = HashSet::new();
for entry in self.messages.iter() {
known_messages.insert((entry.topic, entry.message_hash));
Expand Down
2 changes: 1 addition & 1 deletion core/network/src/on_demand.rs
Original file line number Diff line number Diff line change
Expand Up @@ -213,7 +213,7 @@ impl<B, E> OnDemandService<B> for OnDemand<B, E> where
B::Header: HeaderT,
{
fn on_connect(&self, peer: NodeIndex, role: Roles, best_number: NumberFor<B>) {
if !role.intersects(Roles::FULL | Roles::AUTHORITY) { // TODO: correct?
if !role.intersects(Roles::FULL | Roles::AUTHORITY) {
return;
}

Expand Down
2 changes: 0 additions & 2 deletions core/network/src/protocol.rs
Original file line number Diff line number Diff line change
Expand Up @@ -458,7 +458,6 @@ impl<B: BlockT, S: NetworkSpecialization<B>, H: ExHashT> Protocol<B, S, H> {
message::FromBlock::Number(n) => BlockId::Number(n),
};
let max = cmp::min(request.max.unwrap_or(u32::max_value()), MAX_BLOCK_DATA_RESPONSE) as usize;
// TODO: receipts, etc.
let get_header = request.fields.contains(message::BlockAttributes::HEADER);
let get_body = request.fields.contains(message::BlockAttributes::BODY);
let get_justification = request.fields.contains(message::BlockAttributes::JUSTIFICATION);
Expand Down Expand Up @@ -498,7 +497,6 @@ impl<B: BlockT, S: NetworkSpecialization<B>, H: ExHashT> Protocol<B, S, H> {
}

fn on_block_response(&self, io: &mut SyncIo, peer: NodeIndex, request: message::BlockRequest<B>, response: message::BlockResponse<B>) {
// TODO: validate response
let blocks_range = match (
response.blocks.first().and_then(|b| b.header.as_ref().map(|h| h.number())),
response.blocks.last().and_then(|b| b.header.as_ref().map(|h| h.number())),
Expand Down
2 changes: 1 addition & 1 deletion core/primitives/src/changes_trie.rs
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ impl ChangesTrieConfiguration {
return 1;
}

// TODO: use saturating_pow when available
// FIXME: use saturating_pow once stabilized - https://github.com/rust-lang/rust/issues/48320
let mut max_digest_interval = self.digest_interval;
for _ in 1..self.digest_levels {
max_digest_interval = match max_digest_interval.checked_mul(self.digest_interval) {
Expand Down
16 changes: 0 additions & 16 deletions core/primitives/src/ed25519.rs
Original file line number Diff line number Diff line change
Expand Up @@ -238,16 +238,6 @@ impl Pair {
r.copy_from_slice(pk);
Public(r)
}

/// Derive a child key. Probably unsafe and broken.
// TODO: proper HD derivation https://cardanolaunch.com/assets/Ed25519_BIP.pdf
pub fn derive_child_probably_bad(&self, chain_data: &[u8]) -> Pair {
let sig = self.sign(chain_data);
let mut seed = [0u8; 32];
seed.copy_from_slice(&sig[..32]);

Pair::from_seed(&seed)
}
}

/// Verify a signature on a message. Returns true if the signature is good.
Expand Down Expand Up @@ -350,12 +340,6 @@ mod test {
assert_eq!(pair1.public(), pair2.public());
}

#[test]
fn derive_child() {
let pair = Pair::generate();
let _pair2 = pair.derive_child_probably_bad(b"session_1234");
}

#[test]
fn ss58check_roundtrip_works() {
let pair = Pair::from_seed(b"12345678901234567890123456789012");
Expand Down
3 changes: 2 additions & 1 deletion core/rpc/src/chain/number.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,8 @@ impl<Number: traits::As<u64>> NumberOrHex<Number> {
match self {
NumberOrHex::Number(n) => Ok(n),
NumberOrHex::Hex(h) => {
// TODO [ToDr] this only supports `u64` since `BlockNumber` is `As<u64>` we could possibly go with `u128`. (#1377)
// FIXME #1377 this only supports `u64` since `BlockNumber`
// is `As<u64>` we could possibly go with `u128`.
let l = h.low_u64();
if U256::from(l) != h {
Err(format!("`{}` does not fit into the block number type.", h))
Expand Down
Loading