diff --git a/Cargo.lock b/Cargo.lock index ffed3cc3..f84ef2a4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -738,21 +738,6 @@ dependencies = [ "winapi-build", ] -[[package]] -name = "kvproto" -version = "0.0.2" -source = "git+https://github.com/pingcap/kvproto.git#c0e3a4d8bbece2b8d796ed5a08649ea5223b7abf" -dependencies = [ - "futures 0.3.5", - "grpcio", - "lazy_static", - "prost", - "prost-derive", - "protobuf", - "protobuf-build", - "raft-proto", -] - [[package]] name = "lazy_static" version = "1.4.0" @@ -898,9 +883,9 @@ dependencies = [ "derive-new", "futures 0.3.5", "grpcio", - "kvproto", "log", "tikv-client-common", + "tikv-client-proto", ] [[package]] @@ -1299,17 +1284,6 @@ dependencies = [ "proc-macro2 1.0.19", ] -[[package]] -name = "raft-proto" -version = "0.6.0-alpha" -source = "git+https://github.com/tikv/raft-rs?rev=e624c1d48460940a40d8aa69b5329460d9af87dd#e624c1d48460940a40d8aa69b5329460d9af87dd" -dependencies = [ - "lazy_static", - "prost", - "protobuf", - "protobuf-build", -] - [[package]] name = "rand" version = "0.4.6" @@ -1855,7 +1829,6 @@ dependencies = [ "futures 0.3.5", "futures-timer", "grpcio", - "kvproto", "lazy_static", "log", "mock-tikv", @@ -1871,6 +1844,7 @@ dependencies = [ "tempdir", "tikv-client-common", "tikv-client-pd", + "tikv-client-proto", "tikv-client-store", "tokio", ] @@ -1885,7 +1859,6 @@ dependencies = [ "failure", "futures 0.3.5", "grpcio", - "kvproto", "lazy_static", "log", "proptest", @@ -1894,6 +1867,7 @@ dependencies = [ "serde", "serde_derive", "tempdir", + "tikv-client-proto", "tokio", ] @@ -1907,15 +1881,28 @@ dependencies = [ "fail", "futures 0.3.5", "grpcio", - "kvproto", "log", "proptest", "proptest-derive", "tempdir", "tikv-client-common", + "tikv-client-proto", "tokio", ] +[[package]] +name = "tikv-client-proto" +version = "0.0.0" +dependencies = [ + "futures 0.3.5", + "grpcio", + "lazy_static", + "prost", + "prost-derive", + "protobuf", + "protobuf-build", +] + [[package]] name = "tikv-client-store" version = "0.0.0" @@ -1924,9 +1911,9 @@ dependencies = [ "derive-new", "futures 0.3.5", "grpcio", - "kvproto", "log", "tikv-client-common", + "tikv-client-proto", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 21bf096b..35c4dd5e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,45 +21,40 @@ name = "tikv_client" derive-new = "0.5" failure = "0.1" futures = { version = "0.3.5", features = ["async-await", "thread-pool"] } +futures-timer = "3.0" grpcio = { version = "0.6", features = [ "secure", "prost-codec" ], default-features = false } -kvproto = { git = "https://github.com/pingcap/kvproto.git", features = [ "prost-codec" ], default-features = false } lazy_static = "1" -log = "0.4" +log = "0.4" +prometheus = { version = "0.8", features = [ "push", "process" ], default-features = false } rand = "0.7" regex = "1" serde = "1.0" serde_derive = "1.0" -futures-timer = "3.0" async-trait = "0.1" tokio = { version = "0.2", features = ["sync"] } tikv-client-common = { path = "tikv-client-common" } tikv-client-pd = { path = "tikv-client-pd" } +tikv-client-proto = { path = "tikv-client-proto" } tikv-client-store = { path = "tikv-client-store" } mock-tikv = {path = "mock-tikv"} -[dependencies.prometheus] -version = "0.8" -default-features = false -features = ["push", "process"] [dev-dependencies] clap = "2.32" -tempdir = "0.3" -tokio = { version = "0.2", features = ["rt-threaded", "macros"] } +fail = { version = "0.3", features = [ "failpoints" ] } proptest = "0.9" proptest-derive = "0.1.0" -fail = { version = "0.3", features = [ "failpoints" ] } -simple_logger = "1.9.0" +tempdir = "0.3" serial_test = "0.5.0" - -[patch.crates-io] -raft-proto = { git = "https://github.com/tikv/raft-rs", rev = "e624c1d48460940a40d8aa69b5329460d9af87dd" } +simple_logger = "1.9.0" +tokio = { version = "0.2", features = ["rt-threaded", "macros"] } [workspace] members = [ "tikv-client-common", "tikv-client-pd", + "tikv-client-proto", "tikv-client-store", "mock-tikv" ] diff --git a/mock-tikv/Cargo.toml b/mock-tikv/Cargo.toml index 72cbe4f7..39dfcdce 100644 --- a/mock-tikv/Cargo.toml +++ b/mock-tikv/Cargo.toml @@ -4,9 +4,9 @@ version = "0.0.0" edition = "2018" [dependencies] +derive-new = "0.5.8" futures = "0.3" grpcio = { version = "0.6", features = [ "secure", "prost-codec" ], default-features = false } -kvproto = { git = "https://github.com/pingcap/kvproto.git", features = [ "prost-codec" ], default-features = false } -derive-new = "0.5.8" -tikv-client-common = { path = "../tikv-client-common"} log = "0.4" +tikv-client-common = { path = "../tikv-client-common"} +tikv-client-proto = { path = "../tikv-client-proto"} diff --git a/mock-tikv/src/pd.rs b/mock-tikv/src/pd.rs index 82dbccec..a13cd44c 100644 --- a/mock-tikv/src/pd.rs +++ b/mock-tikv/src/pd.rs @@ -3,8 +3,8 @@ use crate::{spawn_unary_success, MOCK_TIKV_PORT}; use futures::{FutureExt, StreamExt, TryFutureExt}; use grpcio::{Environment, Server, ServerBuilder, WriteFlags}; -use kvproto::pdpb::*; use std::sync::Arc; +use tikv_client_proto::pdpb::*; pub const MOCK_PD_PORT: u16 = 50021; /// This is mock pd server, used with mock tikv server. @@ -18,8 +18,8 @@ impl MockPd { MockPd { ts: 0 } } - fn region() -> kvproto::metapb::Region { - kvproto::metapb::Region { + fn region() -> tikv_client_proto::metapb::Region { + tikv_client_proto::metapb::Region { start_key: vec![], end_key: vec![], peers: vec![Self::leader()], @@ -27,13 +27,13 @@ impl MockPd { } } - fn leader() -> kvproto::metapb::Peer { - kvproto::metapb::Peer::default() + fn leader() -> tikv_client_proto::metapb::Peer { + tikv_client_proto::metapb::Peer::default() } - fn store() -> kvproto::metapb::Store { + fn store() -> tikv_client_proto::metapb::Store { // TODO: start_timestamp? - kvproto::metapb::Store { + tikv_client_proto::metapb::Store { address: format!("localhost:{}", MOCK_TIKV_PORT), ..Default::default() } @@ -317,4 +317,13 @@ impl Pd for MockPd { ) { todo!() } + + fn sync_max_ts( + &mut self, + _ctx: ::grpcio::RpcContext, + _req: SyncMaxTsRequest, + _sink: ::grpcio::UnarySink, + ) { + todo!() + } } diff --git a/mock-tikv/src/server.rs b/mock-tikv/src/server.rs index 275b6d7e..caf0a6fc 100644 --- a/mock-tikv/src/server.rs +++ b/mock-tikv/src/server.rs @@ -4,8 +4,8 @@ use crate::{spawn_unary_success, KvStore}; use derive_new::new; use futures::{FutureExt, TryFutureExt}; use grpcio::{Environment, Server, ServerBuilder}; -use kvproto::{kvrpcpb::*, tikvpb::*}; use std::sync::Arc; +use tikv_client_proto::{kvrpcpb::*, tikvpb::*}; pub const MOCK_TIKV_PORT: u16 = 50019; @@ -29,8 +29,8 @@ impl Tikv for MockTikv { fn kv_get( &mut self, _ctx: grpcio::RpcContext, - _req: kvproto::kvrpcpb::GetRequest, - _sink: grpcio::UnarySink, + _req: tikv_client_proto::kvrpcpb::GetRequest, + _sink: grpcio::UnarySink, ) { todo!() } @@ -38,8 +38,8 @@ impl Tikv for MockTikv { fn kv_scan( &mut self, _ctx: grpcio::RpcContext, - _req: kvproto::kvrpcpb::ScanRequest, - _sink: grpcio::UnarySink, + _req: tikv_client_proto::kvrpcpb::ScanRequest, + _sink: grpcio::UnarySink, ) { todo!() } @@ -47,8 +47,8 @@ impl Tikv for MockTikv { fn kv_prewrite( &mut self, _ctx: grpcio::RpcContext, - _req: kvproto::kvrpcpb::PrewriteRequest, - _sink: grpcio::UnarySink, + _req: tikv_client_proto::kvrpcpb::PrewriteRequest, + _sink: grpcio::UnarySink, ) { todo!() } @@ -56,8 +56,8 @@ impl Tikv for MockTikv { fn kv_pessimistic_lock( &mut self, _ctx: grpcio::RpcContext, - _req: kvproto::kvrpcpb::PessimisticLockRequest, - _sink: grpcio::UnarySink, + _req: tikv_client_proto::kvrpcpb::PessimisticLockRequest, + _sink: grpcio::UnarySink, ) { todo!() } @@ -65,8 +65,8 @@ impl Tikv for MockTikv { fn kv_pessimistic_rollback( &mut self, _ctx: grpcio::RpcContext, - _req: kvproto::kvrpcpb::PessimisticRollbackRequest, - _sink: grpcio::UnarySink, + _req: tikv_client_proto::kvrpcpb::PessimisticRollbackRequest, + _sink: grpcio::UnarySink, ) { todo!() } @@ -74,8 +74,8 @@ impl Tikv for MockTikv { fn kv_txn_heart_beat( &mut self, _ctx: grpcio::RpcContext, - _req: kvproto::kvrpcpb::TxnHeartBeatRequest, - _sink: grpcio::UnarySink, + _req: tikv_client_proto::kvrpcpb::TxnHeartBeatRequest, + _sink: grpcio::UnarySink, ) { todo!() } @@ -83,8 +83,8 @@ impl Tikv for MockTikv { fn kv_check_txn_status( &mut self, _ctx: grpcio::RpcContext, - _req: kvproto::kvrpcpb::CheckTxnStatusRequest, - _sink: grpcio::UnarySink, + _req: tikv_client_proto::kvrpcpb::CheckTxnStatusRequest, + _sink: grpcio::UnarySink, ) { todo!() } @@ -92,8 +92,8 @@ impl Tikv for MockTikv { fn kv_commit( &mut self, _ctx: grpcio::RpcContext, - _req: kvproto::kvrpcpb::CommitRequest, - _sink: grpcio::UnarySink, + _req: tikv_client_proto::kvrpcpb::CommitRequest, + _sink: grpcio::UnarySink, ) { todo!() } @@ -101,8 +101,8 @@ impl Tikv for MockTikv { fn kv_import( &mut self, _ctx: grpcio::RpcContext, - _req: kvproto::kvrpcpb::ImportRequest, - _sink: grpcio::UnarySink, + _req: tikv_client_proto::kvrpcpb::ImportRequest, + _sink: grpcio::UnarySink, ) { todo!() } @@ -110,8 +110,8 @@ impl Tikv for MockTikv { fn kv_cleanup( &mut self, _ctx: grpcio::RpcContext, - _req: kvproto::kvrpcpb::CleanupRequest, - _sink: grpcio::UnarySink, + _req: tikv_client_proto::kvrpcpb::CleanupRequest, + _sink: grpcio::UnarySink, ) { todo!() } @@ -119,8 +119,8 @@ impl Tikv for MockTikv { fn kv_batch_get( &mut self, _ctx: grpcio::RpcContext, - _req: kvproto::kvrpcpb::BatchGetRequest, - _sink: grpcio::UnarySink, + _req: tikv_client_proto::kvrpcpb::BatchGetRequest, + _sink: grpcio::UnarySink, ) { todo!() } @@ -128,8 +128,8 @@ impl Tikv for MockTikv { fn kv_batch_rollback( &mut self, _ctx: grpcio::RpcContext, - _req: kvproto::kvrpcpb::BatchRollbackRequest, - _sink: grpcio::UnarySink, + _req: tikv_client_proto::kvrpcpb::BatchRollbackRequest, + _sink: grpcio::UnarySink, ) { todo!() } @@ -137,8 +137,8 @@ impl Tikv for MockTikv { fn kv_scan_lock( &mut self, _ctx: grpcio::RpcContext, - _req: kvproto::kvrpcpb::ScanLockRequest, - _sink: grpcio::UnarySink, + _req: tikv_client_proto::kvrpcpb::ScanLockRequest, + _sink: grpcio::UnarySink, ) { todo!() } @@ -146,8 +146,8 @@ impl Tikv for MockTikv { fn kv_resolve_lock( &mut self, _ctx: grpcio::RpcContext, - _req: kvproto::kvrpcpb::ResolveLockRequest, - _sink: grpcio::UnarySink, + _req: tikv_client_proto::kvrpcpb::ResolveLockRequest, + _sink: grpcio::UnarySink, ) { todo!() } @@ -155,8 +155,8 @@ impl Tikv for MockTikv { fn kv_gc( &mut self, _ctx: grpcio::RpcContext, - _req: kvproto::kvrpcpb::GcRequest, - _sink: grpcio::UnarySink, + _req: tikv_client_proto::kvrpcpb::GcRequest, + _sink: grpcio::UnarySink, ) { todo!() } @@ -164,8 +164,8 @@ impl Tikv for MockTikv { fn kv_delete_range( &mut self, _ctx: grpcio::RpcContext, - _req: kvproto::kvrpcpb::DeleteRangeRequest, - _sink: grpcio::UnarySink, + _req: tikv_client_proto::kvrpcpb::DeleteRangeRequest, + _sink: grpcio::UnarySink, ) { todo!() } @@ -173,8 +173,8 @@ impl Tikv for MockTikv { fn raw_get( &mut self, ctx: grpcio::RpcContext, - req: kvproto::kvrpcpb::RawGetRequest, - sink: grpcio::UnarySink, + req: tikv_client_proto::kvrpcpb::RawGetRequest, + sink: grpcio::UnarySink, ) { let mut resp = RawGetResponse::default(); if let Some(v) = self.inner.raw_get(req.get_key()) { @@ -188,10 +188,10 @@ impl Tikv for MockTikv { fn raw_batch_get( &mut self, ctx: grpcio::RpcContext, - mut req: kvproto::kvrpcpb::RawBatchGetRequest, - sink: grpcio::UnarySink, + mut req: tikv_client_proto::kvrpcpb::RawBatchGetRequest, + sink: grpcio::UnarySink, ) { - let mut resp = kvproto::kvrpcpb::RawBatchGetResponse::default(); + let mut resp = tikv_client_proto::kvrpcpb::RawBatchGetResponse::default(); resp.set_pairs(self.inner.raw_batch_get(req.take_keys())); spawn_unary_success!(ctx, req, resp, sink); } @@ -199,8 +199,8 @@ impl Tikv for MockTikv { fn raw_put( &mut self, ctx: grpcio::RpcContext, - req: kvproto::kvrpcpb::RawPutRequest, - sink: grpcio::UnarySink, + req: tikv_client_proto::kvrpcpb::RawPutRequest, + sink: grpcio::UnarySink, ) { self.inner .raw_put(req.get_key().to_vec(), req.get_value().to_vec()); @@ -211,8 +211,8 @@ impl Tikv for MockTikv { fn raw_batch_put( &mut self, ctx: grpcio::RpcContext, - mut req: kvproto::kvrpcpb::RawBatchPutRequest, - sink: grpcio::UnarySink, + mut req: tikv_client_proto::kvrpcpb::RawBatchPutRequest, + sink: grpcio::UnarySink, ) { let pairs = req.take_pairs(); self.inner.raw_batch_put(pairs); @@ -223,8 +223,8 @@ impl Tikv for MockTikv { fn raw_delete( &mut self, ctx: grpcio::RpcContext, - req: kvproto::kvrpcpb::RawDeleteRequest, - sink: grpcio::UnarySink, + req: tikv_client_proto::kvrpcpb::RawDeleteRequest, + sink: grpcio::UnarySink, ) { let key = req.get_key(); self.inner.raw_delete(key); @@ -235,8 +235,8 @@ impl Tikv for MockTikv { fn raw_batch_delete( &mut self, ctx: grpcio::RpcContext, - mut req: kvproto::kvrpcpb::RawBatchDeleteRequest, - sink: grpcio::UnarySink, + mut req: tikv_client_proto::kvrpcpb::RawBatchDeleteRequest, + sink: grpcio::UnarySink, ) { let keys = req.take_keys(); self.inner.raw_batch_delete(keys); @@ -247,8 +247,8 @@ impl Tikv for MockTikv { fn raw_scan( &mut self, _ctx: grpcio::RpcContext, - _req: kvproto::kvrpcpb::RawScanRequest, - _sink: grpcio::UnarySink, + _req: tikv_client_proto::kvrpcpb::RawScanRequest, + _sink: grpcio::UnarySink, ) { todo!() } @@ -256,8 +256,8 @@ impl Tikv for MockTikv { fn raw_delete_range( &mut self, _ctx: grpcio::RpcContext, - _req: kvproto::kvrpcpb::RawDeleteRangeRequest, - _sink: grpcio::UnarySink, + _req: tikv_client_proto::kvrpcpb::RawDeleteRangeRequest, + _sink: grpcio::UnarySink, ) { todo!() } @@ -265,8 +265,8 @@ impl Tikv for MockTikv { fn raw_batch_scan( &mut self, _ctx: grpcio::RpcContext, - _req: kvproto::kvrpcpb::RawBatchScanRequest, - _sink: grpcio::UnarySink, + _req: tikv_client_proto::kvrpcpb::RawBatchScanRequest, + _sink: grpcio::UnarySink, ) { todo!() } @@ -274,8 +274,8 @@ impl Tikv for MockTikv { fn ver_get( &mut self, _ctx: grpcio::RpcContext, - _req: kvproto::kvrpcpb::VerGetRequest, - _sink: grpcio::UnarySink, + _req: tikv_client_proto::kvrpcpb::VerGetRequest, + _sink: grpcio::UnarySink, ) { todo!() } @@ -283,8 +283,8 @@ impl Tikv for MockTikv { fn ver_batch_get( &mut self, _ctx: grpcio::RpcContext, - _req: kvproto::kvrpcpb::VerBatchGetRequest, - _sink: grpcio::UnarySink, + _req: tikv_client_proto::kvrpcpb::VerBatchGetRequest, + _sink: grpcio::UnarySink, ) { todo!() } @@ -292,8 +292,8 @@ impl Tikv for MockTikv { fn ver_mut( &mut self, _ctx: grpcio::RpcContext, - _req: kvproto::kvrpcpb::VerMutRequest, - _sink: grpcio::UnarySink, + _req: tikv_client_proto::kvrpcpb::VerMutRequest, + _sink: grpcio::UnarySink, ) { todo!() } @@ -301,8 +301,8 @@ impl Tikv for MockTikv { fn ver_batch_mut( &mut self, _ctx: grpcio::RpcContext, - _req: kvproto::kvrpcpb::VerBatchMutRequest, - _sink: grpcio::UnarySink, + _req: tikv_client_proto::kvrpcpb::VerBatchMutRequest, + _sink: grpcio::UnarySink, ) { todo!() } @@ -310,8 +310,8 @@ impl Tikv for MockTikv { fn ver_scan( &mut self, _ctx: grpcio::RpcContext, - _req: kvproto::kvrpcpb::VerScanRequest, - _sink: grpcio::UnarySink, + _req: tikv_client_proto::kvrpcpb::VerScanRequest, + _sink: grpcio::UnarySink, ) { todo!() } @@ -319,8 +319,8 @@ impl Tikv for MockTikv { fn ver_delete_range( &mut self, _ctx: grpcio::RpcContext, - _req: kvproto::kvrpcpb::VerDeleteRangeRequest, - _sink: grpcio::UnarySink, + _req: tikv_client_proto::kvrpcpb::VerDeleteRangeRequest, + _sink: grpcio::UnarySink, ) { todo!() } @@ -328,8 +328,8 @@ impl Tikv for MockTikv { fn unsafe_destroy_range( &mut self, _ctx: grpcio::RpcContext, - _req: kvproto::kvrpcpb::UnsafeDestroyRangeRequest, - _sink: grpcio::UnarySink, + _req: tikv_client_proto::kvrpcpb::UnsafeDestroyRangeRequest, + _sink: grpcio::UnarySink, ) { todo!() } @@ -337,8 +337,8 @@ impl Tikv for MockTikv { fn register_lock_observer( &mut self, _ctx: grpcio::RpcContext, - _req: kvproto::kvrpcpb::RegisterLockObserverRequest, - _sink: grpcio::UnarySink, + _req: tikv_client_proto::kvrpcpb::RegisterLockObserverRequest, + _sink: grpcio::UnarySink, ) { todo!() } @@ -346,8 +346,8 @@ impl Tikv for MockTikv { fn check_lock_observer( &mut self, _ctx: grpcio::RpcContext, - _req: kvproto::kvrpcpb::CheckLockObserverRequest, - _sink: grpcio::UnarySink, + _req: tikv_client_proto::kvrpcpb::CheckLockObserverRequest, + _sink: grpcio::UnarySink, ) { todo!() } @@ -355,8 +355,8 @@ impl Tikv for MockTikv { fn remove_lock_observer( &mut self, _ctx: grpcio::RpcContext, - _req: kvproto::kvrpcpb::RemoveLockObserverRequest, - _sink: grpcio::UnarySink, + _req: tikv_client_proto::kvrpcpb::RemoveLockObserverRequest, + _sink: grpcio::UnarySink, ) { todo!() } @@ -364,8 +364,8 @@ impl Tikv for MockTikv { fn physical_scan_lock( &mut self, _ctx: grpcio::RpcContext, - _req: kvproto::kvrpcpb::PhysicalScanLockRequest, - _sink: grpcio::UnarySink, + _req: tikv_client_proto::kvrpcpb::PhysicalScanLockRequest, + _sink: grpcio::UnarySink, ) { todo!() } @@ -373,8 +373,8 @@ impl Tikv for MockTikv { fn coprocessor( &mut self, _ctx: grpcio::RpcContext, - _req: kvproto::coprocessor::Request, - _sink: grpcio::UnarySink, + _req: tikv_client_proto::coprocessor::Request, + _sink: grpcio::UnarySink, ) { todo!() } @@ -382,8 +382,8 @@ impl Tikv for MockTikv { fn coprocessor_stream( &mut self, _ctx: grpcio::RpcContext, - _req: kvproto::coprocessor::Request, - _sink: grpcio::ServerStreamingSink, + _req: tikv_client_proto::coprocessor::Request, + _sink: grpcio::ServerStreamingSink, ) { todo!() } @@ -391,8 +391,8 @@ impl Tikv for MockTikv { fn batch_coprocessor( &mut self, _ctx: grpcio::RpcContext, - _req: kvproto::coprocessor::BatchRequest, - _sink: grpcio::ServerStreamingSink, + _req: tikv_client_proto::coprocessor::BatchRequest, + _sink: grpcio::ServerStreamingSink, ) { todo!() } @@ -400,8 +400,8 @@ impl Tikv for MockTikv { fn raft( &mut self, _ctx: grpcio::RpcContext, - _stream: grpcio::RequestStream, - _sink: grpcio::ClientStreamingSink, + _stream: grpcio::RequestStream, + _sink: grpcio::ClientStreamingSink, ) { todo!() } @@ -409,8 +409,8 @@ impl Tikv for MockTikv { fn batch_raft( &mut self, _ctx: grpcio::RpcContext, - _stream: grpcio::RequestStream, - _sink: grpcio::ClientStreamingSink, + _stream: grpcio::RequestStream, + _sink: grpcio::ClientStreamingSink, ) { todo!() } @@ -418,8 +418,8 @@ impl Tikv for MockTikv { fn snapshot( &mut self, _ctx: grpcio::RpcContext, - _stream: grpcio::RequestStream, - _sink: grpcio::ClientStreamingSink, + _stream: grpcio::RequestStream, + _sink: grpcio::ClientStreamingSink, ) { todo!() } @@ -427,8 +427,8 @@ impl Tikv for MockTikv { fn split_region( &mut self, _ctx: grpcio::RpcContext, - _req: kvproto::kvrpcpb::SplitRegionRequest, - _sink: grpcio::UnarySink, + _req: tikv_client_proto::kvrpcpb::SplitRegionRequest, + _sink: grpcio::UnarySink, ) { todo!() } @@ -436,8 +436,8 @@ impl Tikv for MockTikv { fn read_index( &mut self, _ctx: grpcio::RpcContext, - _req: kvproto::kvrpcpb::ReadIndexRequest, - _sink: grpcio::UnarySink, + _req: tikv_client_proto::kvrpcpb::ReadIndexRequest, + _sink: grpcio::UnarySink, ) { todo!() } @@ -445,8 +445,8 @@ impl Tikv for MockTikv { fn mvcc_get_by_key( &mut self, _ctx: grpcio::RpcContext, - _req: kvproto::kvrpcpb::MvccGetByKeyRequest, - _sink: grpcio::UnarySink, + _req: tikv_client_proto::kvrpcpb::MvccGetByKeyRequest, + _sink: grpcio::UnarySink, ) { todo!() } @@ -454,8 +454,8 @@ impl Tikv for MockTikv { fn mvcc_get_by_start_ts( &mut self, _ctx: grpcio::RpcContext, - _req: kvproto::kvrpcpb::MvccGetByStartTsRequest, - _sink: grpcio::UnarySink, + _req: tikv_client_proto::kvrpcpb::MvccGetByStartTsRequest, + _sink: grpcio::UnarySink, ) { todo!() } @@ -463,8 +463,8 @@ impl Tikv for MockTikv { fn batch_commands( &mut self, _ctx: grpcio::RpcContext, - _stream: grpcio::RequestStream, - _sink: grpcio::DuplexSink, + _stream: grpcio::RequestStream, + _sink: grpcio::DuplexSink, ) { todo!() } @@ -472,8 +472,35 @@ impl Tikv for MockTikv { fn kv_check_secondary_locks( &mut self, _: grpcio::RpcContext<'_>, - _: kvproto::kvrpcpb::CheckSecondaryLocksRequest, - _: grpcio::UnarySink, + _: tikv_client_proto::kvrpcpb::CheckSecondaryLocksRequest, + _: grpcio::UnarySink, + ) { + todo!() + } + + fn dispatch_mpp_task( + &mut self, + _: grpcio::RpcContext<'_>, + _: tikv_client_proto::mpp::DispatchTaskRequest, + _: grpcio::UnarySink, + ) { + todo!() + } + + fn cancel_mpp_task( + &mut self, + _: grpcio::RpcContext<'_>, + _: tikv_client_proto::mpp::CancelTaskRequest, + _: grpcio::UnarySink, + ) { + todo!() + } + + fn establish_mpp_connection( + &mut self, + _: grpcio::RpcContext<'_>, + _: tikv_client_proto::mpp::EstablishMppConnectionRequest, + _: grpcio::ServerStreamingSink, ) { todo!() } diff --git a/mock-tikv/src/store.rs b/mock-tikv/src/store.rs index a8ed8943..a69c3afc 100644 --- a/mock-tikv/src/store.rs +++ b/mock-tikv/src/store.rs @@ -1,10 +1,10 @@ // Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0. -use kvproto::kvrpcpb::KvPair; use std::{ collections::HashMap, sync::{Arc, RwLock}, }; +use tikv_client_proto::kvrpcpb::KvPair; #[derive(Debug, Clone)] pub struct KvStore { diff --git a/src/mock.rs b/src/mock.rs index c483c924..834698d0 100644 --- a/src/mock.rs +++ b/src/mock.rs @@ -11,8 +11,8 @@ use crate::{ }; use async_trait::async_trait; use derive_new::new; -use kvproto::metapb; use std::{any::Any, sync::Arc}; +use tikv_client_proto::metapb; use tikv_client_store::{KvClient, KvConnect, Region, RegionId, Request, Store}; /// Create a `PdRpcClient` with it's internals replaced with mocks so that the diff --git a/src/pd/retry.rs b/src/pd/retry.rs index ece6ae0c..c17a1676 100644 --- a/src/pd/retry.rs +++ b/src/pd/retry.rs @@ -6,16 +6,16 @@ use crate::{stats::pd_stats, Error, Region, RegionId, Result, SecurityManager, S use async_trait::async_trait; use futures_timer::Delay; use grpcio::Environment; -use kvproto::{ - metapb, - pdpb::{self, Timestamp}, -}; use std::{ fmt, sync::Arc, time::{Duration, Instant}, }; use tikv_client_pd::{Cluster, Connection}; +use tikv_client_proto::{ + metapb, + pdpb::{self, Timestamp}, +}; use tokio::sync::RwLock; // FIXME: these numbers and how they are used are all just cargo-culted in, there diff --git a/src/raw/requests.rs b/src/raw/requests.rs index 099e3a11..694d0031 100644 --- a/src/raw/requests.rs +++ b/src/raw/requests.rs @@ -12,8 +12,8 @@ use crate::{ }; use async_trait::async_trait; use futures::{prelude::*, stream::BoxStream}; -use kvproto::kvrpcpb; use std::{mem, sync::Arc}; +use tikv_client_proto::kvrpcpb; use tikv_client_store::Store; #[async_trait] @@ -470,8 +470,8 @@ mod test { request::OPTIMISTIC_BACKOFF, }; use futures::executor; - use kvproto::kvrpcpb; use std::any::Any; + use tikv_client_proto::kvrpcpb; #[test] #[ignore] diff --git a/src/request.rs b/src/request.rs index 334f0a90..b31a74cf 100644 --- a/src/request.rs +++ b/src/request.rs @@ -264,8 +264,8 @@ mod test { use crate::mock::{MockKvClient, MockPdClient}; use futures::executor; use grpcio::CallOption; - use kvproto::{kvrpcpb, tikvpb::TikvClient}; use std::{any::Any, sync::Mutex}; + use tikv_client_proto::{kvrpcpb, tikvpb::TikvClient}; #[test] fn test_region_retry() { diff --git a/src/transaction/buffer.rs b/src/transaction/buffer.rs index 4666c4a0..f03a6eb4 100644 --- a/src/transaction/buffer.rs +++ b/src/transaction/buffer.rs @@ -1,12 +1,12 @@ // Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0. use crate::{BoundRange, Key, KvPair, Result, Value}; -use kvproto::kvrpcpb; use std::{ collections::{BTreeMap, HashMap}, future::Future, sync::Mutex, }; +use tikv_client_proto::kvrpcpb; /// A caching layer which buffers reads and writes in a transaction. #[derive(Default)] diff --git a/src/transaction/client.rs b/src/transaction/client.rs index 6a013902..f9033035 100644 --- a/src/transaction/client.rs +++ b/src/transaction/client.rs @@ -9,9 +9,9 @@ use crate::{ Result, }; use futures::executor::ThreadPool; -use kvproto::{kvrpcpb, pdpb::Timestamp}; use std::{mem, sync::Arc}; use tikv_client_common::TimestampExt; +use tikv_client_proto::{kvrpcpb, pdpb::Timestamp}; const SCAN_LOCK_BATCH_SIZE: u32 = 1024; // TODO: cargo-culted value diff --git a/src/transaction/lock.rs b/src/transaction/lock.rs index 2690992f..8da6b8d0 100644 --- a/src/transaction/lock.rs +++ b/src/transaction/lock.rs @@ -4,12 +4,12 @@ use crate::{ transaction::requests, ErrorKind, Key, RegionVerId, Result, }; -use kvproto::{kvrpcpb, pdpb::Timestamp}; use std::{ collections::{HashMap, HashSet}, sync::Arc, }; use tikv_client_common::TimestampExt; +use tikv_client_proto::{kvrpcpb, pdpb::Timestamp}; const RESOLVE_LOCK_RETRY_LIMIT: usize = 10; @@ -125,8 +125,8 @@ mod tests { use super::*; use crate::mock::{MockKvClient, MockPdClient}; use futures::executor; - use kvproto::errorpb; use std::any::Any; + use tikv_client_proto::errorpb; #[test] fn test_resolve_lock_with_retry() { diff --git a/src/transaction/requests.rs b/src/transaction/requests.rs index 8841ac58..18914d3e 100644 --- a/src/transaction/requests.rs +++ b/src/transaction/requests.rs @@ -9,9 +9,9 @@ use crate::{ }; use async_trait::async_trait; use futures::{prelude::*, stream::BoxStream}; -use kvproto::{kvrpcpb, pdpb::Timestamp}; use std::{iter, mem, sync::Arc}; use tikv_client_common::TimestampExt; +use tikv_client_proto::{kvrpcpb, pdpb::Timestamp}; use tikv_client_store::Store; #[async_trait] diff --git a/src/transaction/transaction.rs b/src/transaction/transaction.rs index b2c3ee24..ea6e5364 100644 --- a/src/transaction/transaction.rs +++ b/src/transaction/transaction.rs @@ -8,9 +8,9 @@ use crate::{ }; use derive_new::new; use futures::{executor::ThreadPool, prelude::*, stream::BoxStream}; -use kvproto::{kvrpcpb, pdpb::Timestamp}; use std::{iter, mem, ops::RangeBounds, sync::Arc}; use tikv_client_common::TimestampExt; +use tikv_client_proto::{kvrpcpb, pdpb::Timestamp}; /// A undo-able set of actions on the dataset. /// diff --git a/tikv-client-common/Cargo.toml b/tikv-client-common/Cargo.toml index 2f3f54cc..0ab3dc28 100644 --- a/tikv-client-common/Cargo.toml +++ b/tikv-client-common/Cargo.toml @@ -9,12 +9,12 @@ derive-new = "0.5" failure = "0.1" futures = { version = "0.3.5", features = ["compat", "async-await", "thread-pool"] } grpcio = { version = "0.6", features = [ "secure", "prost-codec" ], default-features = false } -kvproto = { git = "https://github.com/pingcap/kvproto.git", features = [ "prost-codec" ], default-features = false } lazy_static = "1" log = "0.4" regex = "1" serde = "1.0" serde_derive = "1.0" +tikv-client-proto = { path = "../tikv-client-proto" } [dev-dependencies] clap = "2.32" diff --git a/tikv-client-common/src/errors.rs b/tikv-client-common/src/errors.rs index 54b8df23..2a841ddb 100644 --- a/tikv-client-common/src/errors.rs +++ b/tikv-client-common/src/errors.rs @@ -1,11 +1,11 @@ // Copyright 2018 TiKV Project Authors. Licensed under Apache-2.0. use failure::{Backtrace, Context, Fail}; -use kvproto::errorpb; use std::{ fmt::{self, Display}, result, }; +use tikv_client_proto::errorpb; #[derive(Debug)] pub struct Error { @@ -33,7 +33,7 @@ pub enum ErrorKind { RegionForKeyNotFound { key: Vec }, /// Errors caused by changed region information #[fail(display = "Region error: {:?}", _0)] - RegionError(kvproto::errorpb::Error), + RegionError(tikv_client_proto::errorpb::Error), /// No region is found for the given id. #[fail(display = "Region {} is not found", region_id)] RegionNotFound { region_id: u64 }, @@ -52,9 +52,9 @@ pub enum ErrorKind { /// Scan limit exceeds the maximum #[fail(display = "Limit {} exceeds max scan limit {}", limit, max_limit)] MaxScanLimitExceeded { limit: u32, max_limit: u32 }, - /// Wraps `kvproto::kvrpcpb::KeyError` + /// Wraps `tikv_client_proto::kvrpcpb::KeyError` #[fail(display = "{:?}", _0)] - KeyError(kvproto::kvrpcpb::KeyError), + KeyError(tikv_client_proto::kvrpcpb::KeyError), /// A string error returned by TiKV server #[fail(display = "Kv error. {}", message)] KvError { message: String }, @@ -104,7 +104,7 @@ impl Error { Error::from(ErrorKind::RegionForKeyNotFound { key }) } - pub fn region_error(error: kvproto::errorpb::Error) -> Self { + pub fn region_error(error: tikv_client_proto::errorpb::Error) -> Self { Error::from(ErrorKind::RegionError(error)) } @@ -183,8 +183,8 @@ impl From for Error { } } -impl From for Error { - fn from(err: kvproto::kvrpcpb::KeyError) -> Self { +impl From for Error { + fn from(err: tikv_client_proto::kvrpcpb::KeyError) -> Self { Error::from(ErrorKind::KeyError(err)) } } diff --git a/tikv-client-common/src/kv/bound_range.rs b/tikv-client-common/src/kv/bound_range.rs index 52e8bc6e..5b6008ef 100644 --- a/tikv-client-common/src/kv/bound_range.rs +++ b/tikv-client-common/src/kv/bound_range.rs @@ -2,7 +2,6 @@ use super::Key; use crate::{Error, Result}; -use kvproto::kvrpcpb; #[cfg(test)] use proptest_derive::Arbitrary; use std::{ @@ -11,6 +10,7 @@ use std::{ convert::TryFrom, ops::{Bound, Range, RangeBounds, RangeFrom, RangeInclusive}, }; +use tikv_client_proto::kvrpcpb; /// A struct for expressing ranges. This type is semi-opaque and is not really meant for users to /// deal with directly. Most functions which operate on ranges will accept any types which diff --git a/tikv-client-common/src/kv/key.rs b/tikv-client-common/src/kv/key.rs index 5664cffb..424bc78e 100644 --- a/tikv-client-common/src/kv/key.rs +++ b/tikv-client-common/src/kv/key.rs @@ -2,13 +2,13 @@ use super::HexRepr; use crate::kv::codec::{self, BytesEncoder}; -use kvproto::kvrpcpb; #[allow(unused_imports)] #[cfg(test)] use proptest::{arbitrary::any_with, collection::size_range}; #[cfg(test)] use proptest_derive::Arbitrary; use std::{fmt, ops::Bound, u8}; +use tikv_client_proto::kvrpcpb; const _PROPTEST_KEY_MAX: usize = 1024 * 2; // 2 KB diff --git a/tikv-client-common/src/kv/kvpair.rs b/tikv-client-common/src/kv/kvpair.rs index a7fad5b9..2dca90a6 100644 --- a/tikv-client-common/src/kv/kvpair.rs +++ b/tikv-client-common/src/kv/kvpair.rs @@ -1,10 +1,10 @@ // Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0. use super::{HexRepr, Key, Value}; -use kvproto::kvrpcpb; #[cfg(test)] use proptest_derive::Arbitrary; use std::{fmt, str}; +use tikv_client_proto::kvrpcpb; /// A key/value pair. /// diff --git a/tikv-client-common/src/timestamp.rs b/tikv-client-common/src/timestamp.rs index 87891dc2..34724aab 100644 --- a/tikv-client-common/src/timestamp.rs +++ b/tikv-client-common/src/timestamp.rs @@ -4,8 +4,8 @@ //! The lower 18 (PHYSICAL_SHIFT_BITS) bits are the logical part of the timestamp. //! The higher bits of the version are the physical part of the timestamp. -pub use kvproto::pdpb::Timestamp; use std::convert::TryInto; +pub use tikv_client_proto::pdpb::Timestamp; const PHYSICAL_SHIFT_BITS: i64 = 18; const LOGICAL_MASK: i64 = (1 << PHYSICAL_SHIFT_BITS) - 1; diff --git a/tikv-client-pd/Cargo.toml b/tikv-client-pd/Cargo.toml index 688639e8..6354baf1 100644 --- a/tikv-client-pd/Cargo.toml +++ b/tikv-client-pd/Cargo.toml @@ -8,9 +8,9 @@ async-trait = "0.1" derive-new = "0.5" futures = { version = "0.3.5", features = ["compat", "async-await", "thread-pool"] } grpcio = { version = "0.6", features = [ "secure", "prost-codec" ], default-features = false } -kvproto = { git = "https://github.com/pingcap/kvproto.git", features = [ "prost-codec" ], default-features = false } log = "0.4" tikv-client-common = { path = "../tikv-client-common" } +tikv-client-proto = { path = "../tikv-client-proto" } tokio = { version = "0.2", features = ["sync"] } [dev-dependencies] diff --git a/tikv-client-pd/src/cluster.rs b/tikv-client-pd/src/cluster.rs index 24b49768..2bc8a377 100644 --- a/tikv-client-pd/src/cluster.rs +++ b/tikv-client-pd/src/cluster.rs @@ -3,13 +3,13 @@ use crate::{timestamp::TimestampOracle, Error, Result, SecurityManager}; use async_trait::async_trait; use grpcio::{CallOption, Environment}; -use kvproto::pdpb::{self, Timestamp}; use std::{ collections::HashSet, sync::Arc, time::{Duration, Instant}, }; use tikv_client_common::internal_err; +use tikv_client_proto::pdpb::{self, Timestamp}; /// A PD cluster. pub struct Cluster { @@ -22,7 +22,7 @@ pub struct Cluster { macro_rules! pd_request { ($cluster_id:expr, $type:ty) => {{ let mut request = <$type>::default(); - let mut header = ::kvproto::pdpb::RequestHeader::default(); + let mut header = ::tikv_client_proto::pdpb::RequestHeader::default(); header.set_cluster_id($cluster_id); request.set_header(header); request diff --git a/tikv-client-pd/src/timestamp.rs b/tikv-client-pd/src/timestamp.rs index 0a5c426c..84577651 100644 --- a/tikv-client-pd/src/timestamp.rs +++ b/tikv-client-pd/src/timestamp.rs @@ -20,8 +20,8 @@ use futures::{ task::{AtomicWaker, Context, Poll}, }; use grpcio::WriteFlags; -use kvproto::pdpb::*; use std::{cell::RefCell, collections::VecDeque, pin::Pin, rc::Rc, thread}; +use tikv_client_proto::pdpb::*; /// It is an empirical value. const MAX_BATCH_SIZE: usize = 64; @@ -152,8 +152,12 @@ impl<'a> Stream for TsoRequestStream<'a> { let req = TsoRequest { header: Some(RequestHeader { cluster_id: self.cluster_id, + // TODO + sender_id: 0, }), count: requests.len() as u32, + // TODO + dc_location: String::new(), }; let request_group = RequestGroup { diff --git a/tikv-client-proto/Cargo.toml b/tikv-client-proto/Cargo.toml new file mode 100644 index 00000000..03cccb3c --- /dev/null +++ b/tikv-client-proto/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "tikv-client-proto" +version = "0.0.0" +edition = "2018" +build = "build.rs" + +[build-dependencies] +protobuf-build = { version = "0.11", default-features = false, features = ["grpcio-prost-codec"] } + +[dependencies] +protobuf = "=2.8.0" +prost = { version = "0.6" } +prost-derive = { version = "0.6" } +futures = "0.3.5" +grpcio = { version = "0.6.0", default-features = false, features = ["secure", "prost-codec"] } +lazy_static = { version = "1.3" } diff --git a/tikv-client-proto/README.md b/tikv-client-proto/README.md new file mode 100644 index 00000000..c38b0560 --- /dev/null +++ b/tikv-client-proto/README.md @@ -0,0 +1,7 @@ +# TiKV client protobuf definitions + +This crate builds Rust protobufs required by the TiKV client. + +The protobuf definitions are in proto and include. These are copied from the [kvproto repo](https://github.com/pingcap/kvproto). They are copied because the kvproto crate is difficult to publish. + +To update the protos, copy them all from that repo. They will be rebuilt automatically when you build the client (or this crate). diff --git a/tikv-client-proto/build.rs b/tikv-client-proto/build.rs new file mode 100644 index 00000000..99e0e9b2 --- /dev/null +++ b/tikv-client-proto/build.rs @@ -0,0 +1,10 @@ +// Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0. + +use protobuf_build::Builder; + +fn main() { + Builder::new() + .search_dir_for_protos("./proto") + .includes(&["./include", "./proto"]) + .generate() +} diff --git a/tikv-client-proto/include/eraftpb.proto b/tikv-client-proto/include/eraftpb.proto new file mode 100644 index 00000000..daf1c629 --- /dev/null +++ b/tikv-client-proto/include/eraftpb.proto @@ -0,0 +1,181 @@ +syntax = "proto3"; +package eraftpb; + +enum EntryType { + EntryNormal = 0; + EntryConfChange = 1; + EntryConfChangeV2 = 2; +} + +// The entry is a type of change that needs to be applied. It contains two data fields. +// While the fields are built into the model; their usage is determined by the entry_type. +// +// For normal entries, the data field should contain the data change that should be applied. +// The context field can be used for any contextual data that might be relevant to the +// application of the data. +// +// For configuration changes, the data will contain the ConfChange message and the +// context will provide anything needed to assist the configuration change. The context +// if for the user to set and use in this case. +message Entry { + EntryType entry_type = 1; + uint64 term = 2; + uint64 index = 3; + bytes data = 4; + bytes context = 6; + + // Deprecated! It is kept for backward compatibility. + // TODO: remove it in the next major release. + bool sync_log = 5; +} + +message SnapshotMetadata { + // The current `ConfState`. + ConfState conf_state = 1; + // The applied index. + uint64 index = 2; + // The term of the applied index. + uint64 term = 3; +} + +message Snapshot { + bytes data = 1; + SnapshotMetadata metadata = 2; +} + +enum MessageType { + MsgHup = 0; + MsgBeat = 1; + MsgPropose = 2; + MsgAppend = 3; + MsgAppendResponse = 4; + MsgRequestVote = 5; + MsgRequestVoteResponse = 6; + MsgSnapshot = 7; + MsgHeartbeat = 8; + MsgHeartbeatResponse = 9; + MsgUnreachable = 10; + MsgSnapStatus = 11; + MsgCheckQuorum = 12; + MsgTransferLeader = 13; + MsgTimeoutNow = 14; + MsgReadIndex = 15; + MsgReadIndexResp = 16; + MsgRequestPreVote = 17; + MsgRequestPreVoteResponse = 18; +} + +message Message { + MessageType msg_type = 1; + uint64 to = 2; + uint64 from = 3; + uint64 term = 4; + uint64 log_term = 5; + uint64 index = 6; + repeated Entry entries = 7; + uint64 commit = 8; + Snapshot snapshot = 9; + uint64 request_snapshot = 13; + bool reject = 10; + uint64 reject_hint = 11; + bytes context = 12; + uint64 priority = 14; +} + +message HardState { + uint64 term = 1; + uint64 vote = 2; + uint64 commit = 3; +} + +enum ConfChangeTransition { + // Automatically use the simple protocol if possible, otherwise fall back + // to ConfChangeType::Implicit. Most applications will want to use this. + Auto = 0; + // Use joint consensus unconditionally, and transition out of them + // automatically (by proposing a zero configuration change). + // + // This option is suitable for applications that want to minimize the time + // spent in the joint configuration and do not store the joint configuration + // in the state machine (outside of InitialState). + Implicit = 1; + // Use joint consensus and remain in the joint configuration until the + // application proposes a no-op configuration change. This is suitable for + // applications that want to explicitly control the transitions, for example + // to use a custom payload (via the Context field). + Explicit = 2; +} + +message ConfState { + repeated uint64 voters = 1; + repeated uint64 learners = 2; + + // The voters in the outgoing config. If not empty the node is in joint consensus. + repeated uint64 voters_outgoing = 3; + // The nodes that will become learners when the outgoing config is removed. + // These nodes are necessarily currently in nodes_joint (or they would have + // been added to the incoming config right away). + repeated uint64 learners_next = 4; + // If set, the config is joint and Raft will automatically transition into + // the final config (i.e. remove the outgoing config) when this is safe. + bool auto_leave = 5; +} + +enum ConfChangeType { + AddNode = 0; + RemoveNode = 1; + AddLearnerNode = 2; +} + +message ConfChange { + ConfChangeType change_type = 2; + uint64 node_id = 3; + bytes context = 4; + + uint64 id = 1; +} + +// ConfChangeSingle is an individual configuration change operation. Multiple +// such operations can be carried out atomically via a ConfChangeV2. +message ConfChangeSingle { + ConfChangeType change_type = 1; + uint64 node_id = 2; +} + +// ConfChangeV2 messages initiate configuration changes. They support both the +// simple "one at a time" membership change protocol and full Joint Consensus +// allowing for arbitrary changes in membership. +// +// The supplied context is treated as an opaque payload and can be used to +// attach an action on the state machine to the application of the config change +// proposal. Note that contrary to Joint Consensus as outlined in the Raft +// paper[1], configuration changes become active when they are *applied* to the +// state machine (not when they are appended to the log). +// +// The simple protocol can be used whenever only a single change is made. +// +// Non-simple changes require the use of Joint Consensus, for which two +// configuration changes are run. The first configuration change specifies the +// desired changes and transitions the Raft group into the joint configuration, +// in which quorum requires a majority of both the pre-changes and post-changes +// configuration. Joint Consensus avoids entering fragile intermediate +// configurations that could compromise survivability. For example, without the +// use of Joint Consensus and running across three availability zones with a +// replication factor of three, it is not possible to replace a voter without +// entering an intermediate configuration that does not survive the outage of +// one availability zone. +// +// The provided ConfChangeTransition specifies how (and whether) Joint Consensus +// is used, and assigns the task of leaving the joint configuration either to +// Raft or the application. Leaving the joint configuration is accomplished by +// proposing a ConfChangeV2 with only and optionally the Context field +// populated. +// +// For details on Raft membership changes, see: +// +// [1]: https://github.com/ongardie/dissertation/blob/master/online-trim.pdf +message ConfChangeV2 { + ConfChangeTransition transition = 1; + repeated ConfChangeSingle changes = 2; + bytes context = 3; +} diff --git a/tikv-client-proto/include/gogoproto/gogo.proto b/tikv-client-proto/include/gogoproto/gogo.proto new file mode 100644 index 00000000..bc8d889f --- /dev/null +++ b/tikv-client-proto/include/gogoproto/gogo.proto @@ -0,0 +1,136 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; +package gogoproto; + +import "google/protobuf/descriptor.proto"; + +option java_package = "com.google.protobuf"; +option java_outer_classname = "GoGoProtos"; +option go_package = "github.com/gogo/protobuf/gogoproto"; + +extend google.protobuf.EnumOptions { + optional bool goproto_enum_prefix = 62001; + optional bool goproto_enum_stringer = 62021; + optional bool enum_stringer = 62022; + optional string enum_customname = 62023; + optional bool enumdecl = 62024; +} + +extend google.protobuf.EnumValueOptions { + optional string enumvalue_customname = 66001; +} + +extend google.protobuf.FileOptions { + optional bool goproto_getters_all = 63001; + optional bool goproto_enum_prefix_all = 63002; + optional bool goproto_stringer_all = 63003; + optional bool verbose_equal_all = 63004; + optional bool face_all = 63005; + optional bool gostring_all = 63006; + optional bool populate_all = 63007; + optional bool stringer_all = 63008; + optional bool onlyone_all = 63009; + + optional bool equal_all = 63013; + optional bool description_all = 63014; + optional bool testgen_all = 63015; + optional bool benchgen_all = 63016; + optional bool marshaler_all = 63017; + optional bool unmarshaler_all = 63018; + optional bool stable_marshaler_all = 63019; + + optional bool sizer_all = 63020; + + optional bool goproto_enum_stringer_all = 63021; + optional bool enum_stringer_all = 63022; + + optional bool unsafe_marshaler_all = 63023; + optional bool unsafe_unmarshaler_all = 63024; + + optional bool goproto_extensions_map_all = 63025; + optional bool goproto_unrecognized_all = 63026; + optional bool gogoproto_import = 63027; + optional bool protosizer_all = 63028; + optional bool compare_all = 63029; + optional bool typedecl_all = 63030; + optional bool enumdecl_all = 63031; + + optional bool goproto_registration = 63032; + optional bool messagename_all = 63033; +} + +extend google.protobuf.MessageOptions { + optional bool goproto_getters = 64001; + optional bool goproto_stringer = 64003; + optional bool verbose_equal = 64004; + optional bool face = 64005; + optional bool gostring = 64006; + optional bool populate = 64007; + optional bool stringer = 67008; + optional bool onlyone = 64009; + + optional bool equal = 64013; + optional bool description = 64014; + optional bool testgen = 64015; + optional bool benchgen = 64016; + optional bool marshaler = 64017; + optional bool unmarshaler = 64018; + optional bool stable_marshaler = 64019; + + optional bool sizer = 64020; + + optional bool unsafe_marshaler = 64023; + optional bool unsafe_unmarshaler = 64024; + + optional bool goproto_extensions_map = 64025; + optional bool goproto_unrecognized = 64026; + + optional bool protosizer = 64028; + optional bool compare = 64029; + + optional bool typedecl = 64030; + + optional bool messagename = 64033; +} + +extend google.protobuf.FieldOptions { + optional bool nullable = 65001; + optional bool embed = 65002; + optional string customtype = 65003; + optional string customname = 65004; + optional string jsontag = 65005; + optional string moretags = 65006; + optional string casttype = 65007; + optional string castkey = 65008; + optional string castvalue = 65009; + + optional bool stdtime = 65010; + optional bool stdduration = 65011; +} diff --git a/tikv-client-proto/include/google/api/annotations.proto b/tikv-client-proto/include/google/api/annotations.proto new file mode 100644 index 00000000..85c361b4 --- /dev/null +++ b/tikv-client-proto/include/google/api/annotations.proto @@ -0,0 +1,31 @@ +// Copyright (c) 2015, Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.api; + +import "google/api/http.proto"; +import "google/protobuf/descriptor.proto"; + +option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; +option java_multiple_files = true; +option java_outer_classname = "AnnotationsProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + +extend google.protobuf.MethodOptions { + // See `HttpRule`. + HttpRule http = 72295728; +} diff --git a/tikv-client-proto/include/google/api/http.proto b/tikv-client-proto/include/google/api/http.proto new file mode 100644 index 00000000..b2977f51 --- /dev/null +++ b/tikv-client-proto/include/google/api/http.proto @@ -0,0 +1,376 @@ +// Copyright 2019 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package google.api; + +option cc_enable_arenas = true; +option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; +option java_multiple_files = true; +option java_outer_classname = "HttpProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + +// Defines the HTTP configuration for an API service. It contains a list of +// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method +// to one or more HTTP REST API methods. +message Http { + // A list of HTTP configuration rules that apply to individual API methods. + // + // **NOTE:** All service configuration rules follow "last one wins" order. + repeated HttpRule rules = 1; + + // When set to true, URL path parameters will be fully URI-decoded except in + // cases of single segment matches in reserved expansion, where "%2F" will be + // left encoded. + // + // The default behavior is to not decode RFC 6570 reserved characters in multi + // segment matches. + bool fully_decode_reserved_expansion = 2; +} + +// # gRPC Transcoding +// +// gRPC Transcoding is a feature for mapping between a gRPC method and one or +// more HTTP REST endpoints. It allows developers to build a single API service +// that supports both gRPC APIs and REST APIs. Many systems, including [Google +// APIs](https://github.com/googleapis/googleapis), +// [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC +// Gateway](https://github.com/grpc-ecosystem/grpc-gateway), +// and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature +// and use it for large scale production services. +// +// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies +// how different portions of the gRPC request message are mapped to the URL +// path, URL query parameters, and HTTP request body. It also controls how the +// gRPC response message is mapped to the HTTP response body. `HttpRule` is +// typically specified as an `google.api.http` annotation on the gRPC method. +// +// Each mapping specifies a URL path template and an HTTP method. The path +// template may refer to one or more fields in the gRPC request message, as long +// as each field is a non-repeated field with a primitive (non-message) type. +// The path template controls how fields of the request message are mapped to +// the URL path. +// +// Example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/{name=messages/*}" +// }; +// } +// } +// message GetMessageRequest { +// string name = 1; // Mapped to URL path. +// } +// message Message { +// string text = 1; // The resource content. +// } +// +// This enables an HTTP REST to gRPC mapping as below: +// +// HTTP | gRPC +// -----|----- +// `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")` +// +// Any fields in the request message which are not bound by the path template +// automatically become HTTP query parameters if there is no HTTP request body. +// For example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get:"/v1/messages/{message_id}" +// }; +// } +// } +// message GetMessageRequest { +// message SubMessage { +// string subfield = 1; +// } +// string message_id = 1; // Mapped to URL path. +// int64 revision = 2; // Mapped to URL query parameter `revision`. +// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. +// } +// +// This enables a HTTP JSON to RPC mapping as below: +// +// HTTP | gRPC +// -----|----- +// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | +// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: +// "foo"))` +// +// Note that fields which are mapped to URL query parameters must have a +// primitive type or a repeated primitive type or a non-repeated message type. +// In the case of a repeated type, the parameter can be repeated in the URL +// as `...?param=A¶m=B`. In the case of a message type, each field of the +// message is mapped to a separate parameter, such as +// `...?foo.a=A&foo.b=B&foo.c=C`. +// +// For HTTP methods that allow a request body, the `body` field +// specifies the mapping. Consider a REST update method on the +// message resource collection: +// +// service Messaging { +// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { +// option (google.api.http) = { +// patch: "/v1/messages/{message_id}" +// body: "message" +// }; +// } +// } +// message UpdateMessageRequest { +// string message_id = 1; // mapped to the URL +// Message message = 2; // mapped to the body +// } +// +// The following HTTP JSON to RPC mapping is enabled, where the +// representation of the JSON in the request body is determined by +// protos JSON encoding: +// +// HTTP | gRPC +// -----|----- +// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: +// "123456" message { text: "Hi!" })` +// +// The special name `*` can be used in the body mapping to define that +// every field not bound by the path template should be mapped to the +// request body. This enables the following alternative definition of +// the update method: +// +// service Messaging { +// rpc UpdateMessage(Message) returns (Message) { +// option (google.api.http) = { +// patch: "/v1/messages/{message_id}" +// body: "*" +// }; +// } +// } +// message Message { +// string message_id = 1; +// string text = 2; +// } +// +// +// The following HTTP JSON to RPC mapping is enabled: +// +// HTTP | gRPC +// -----|----- +// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: +// "123456" text: "Hi!")` +// +// Note that when using `*` in the body mapping, it is not possible to +// have HTTP parameters, as all fields not bound by the path end in +// the body. This makes this option more rarely used in practice when +// defining REST APIs. The common usage of `*` is in custom methods +// which don't use the URL at all for transferring data. +// +// It is possible to define multiple HTTP methods for one RPC by using +// the `additional_bindings` option. Example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/messages/{message_id}" +// additional_bindings { +// get: "/v1/users/{user_id}/messages/{message_id}" +// } +// }; +// } +// } +// message GetMessageRequest { +// string message_id = 1; +// string user_id = 2; +// } +// +// This enables the following two alternative HTTP JSON to RPC mappings: +// +// HTTP | gRPC +// -----|----- +// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` +// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: +// "123456")` +// +// ## Rules for HTTP mapping +// +// 1. Leaf request fields (recursive expansion nested messages in the request +// message) are classified into three categories: +// - Fields referred by the path template. They are passed via the URL path. +// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They are passed via the HTTP +// request body. +// - All other fields are passed via the URL query parameters, and the +// parameter name is the field path in the request message. A repeated +// field can be represented as multiple query parameters under the same +// name. +// 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL query parameter, all fields +// are passed via URL path and HTTP request body. +// 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP request body, all +// fields are passed via URL path and URL query parameters. +// +// ### Path template syntax +// +// Template = "/" Segments [ Verb ] ; +// Segments = Segment { "/" Segment } ; +// Segment = "*" | "**" | LITERAL | Variable ; +// Variable = "{" FieldPath [ "=" Segments ] "}" ; +// FieldPath = IDENT { "." IDENT } ; +// Verb = ":" LITERAL ; +// +// The syntax `*` matches a single URL path segment. The syntax `**` matches +// zero or more URL path segments, which must be the last part of the URL path +// except the `Verb`. +// +// The syntax `Variable` matches part of the URL path as specified by its +// template. A variable template must not contain other variables. If a variable +// matches a single path segment, its template may be omitted, e.g. `{var}` +// is equivalent to `{var=*}`. +// +// The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` +// contains any reserved character, such characters should be percent-encoded +// before the matching. +// +// If a variable contains exactly one path segment, such as `"{var}"` or +// `"{var=*}"`, when such a variable is expanded into a URL path on the client +// side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The +// server side does the reverse decoding. Such variables show up in the +// [Discovery +// Document](https://developers.google.com/discovery/v1/reference/apis) as +// `{var}`. +// +// If a variable contains multiple path segments, such as `"{var=foo/*}"` +// or `"{var=**}"`, when such a variable is expanded into a URL path on the +// client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. +// The server side does the reverse decoding, except "%2F" and "%2f" are left +// unchanged. Such variables show up in the +// [Discovery +// Document](https://developers.google.com/discovery/v1/reference/apis) as +// `{+var}`. +// +// ## Using gRPC API Service Configuration +// +// gRPC API Service Configuration (service config) is a configuration language +// for configuring a gRPC service to become a user-facing product. The +// service config is simply the YAML representation of the `google.api.Service` +// proto message. +// +// As an alternative to annotating your proto file, you can configure gRPC +// transcoding in your service config YAML files. You do this by specifying a +// `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same +// effect as the proto annotation. This can be particularly useful if you +// have a proto that is reused in multiple services. Note that any transcoding +// specified in the service config will override any matching transcoding +// configuration in the proto. +// +// Example: +// +// http: +// rules: +// # Selects a gRPC method and applies HttpRule to it. +// - selector: example.v1.Messaging.GetMessage +// get: /v1/messages/{message_id}/{sub.subfield} +// +// ## Special notes +// +// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the +// proto to JSON conversion must follow the [proto3 +// specification](https://developers.google.com/protocol-buffers/docs/proto3#json). +// +// While the single segment variable follows the semantics of +// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String +// Expansion, the multi segment variable **does not** follow RFC 6570 Section +// 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion +// does not expand special characters like `?` and `#`, which would lead +// to invalid URLs. As the result, gRPC Transcoding uses a custom encoding +// for multi segment variables. +// +// The path variables **must not** refer to any repeated or mapped field, +// because client libraries are not capable of handling such variable expansion. +// +// The path variables **must not** capture the leading "/" character. The reason +// is that the most common use case "{var}" does not capture the leading "/" +// character. For consistency, all path variables must share the same behavior. +// +// Repeated message fields must not be mapped to URL query parameters, because +// no client library can support such complicated mapping. +// +// If an API needs to use a JSON array for request or response body, it can map +// the request or response body to a repeated field. However, some gRPC +// Transcoding implementations may not support this feature. +message HttpRule { + // Selects a method to which this rule applies. + // + // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. + string selector = 1; + + // Determines the URL pattern is matched by this rules. This pattern can be + // used with any of the {get|put|post|delete|patch} methods. A custom method + // can be defined using the 'custom' field. + oneof pattern { + // Maps to HTTP GET. Used for listing and getting information about + // resources. + string get = 2; + + // Maps to HTTP PUT. Used for replacing a resource. + string put = 3; + + // Maps to HTTP POST. Used for creating a resource or performing an action. + string post = 4; + + // Maps to HTTP DELETE. Used for deleting a resource. + string delete = 5; + + // Maps to HTTP PATCH. Used for updating a resource. + string patch = 6; + + // The custom pattern is used for specifying an HTTP method that is not + // included in the `pattern` field, such as HEAD, or "*" to leave the + // HTTP method unspecified for this rule. The wild-card rule is useful + // for services that provide content to Web (HTML) clients. + CustomHttpPattern custom = 8; + } + + // The name of the request field whose value is mapped to the HTTP request + // body, or `*` for mapping all request fields not captured by the path + // pattern to the HTTP body, or omitted for not having any HTTP request body. + // + // NOTE: the referred field must be present at the top-level of the request + // message type. + string body = 7; + + // Optional. The name of the response field whose value is mapped to the HTTP + // response body. When omitted, the entire response message will be used + // as the HTTP response body. + // + // NOTE: The referred field must be present at the top-level of the response + // message type. + string response_body = 12; + + // Additional HTTP bindings for the selector. Nested bindings must + // not contain an `additional_bindings` field themselves (that is, + // the nesting may only be one level deep). + repeated HttpRule additional_bindings = 11; +} + +// A custom pattern is used for defining custom HTTP verb. +message CustomHttpPattern { + // The name of this custom HTTP verb. + string kind = 1; + + // The path matched by this custom verb. + string path = 2; +} diff --git a/tikv-client-proto/include/google/protobuf/any.proto b/tikv-client-proto/include/google/protobuf/any.proto new file mode 100644 index 00000000..b6cc7cb2 --- /dev/null +++ b/tikv-client-proto/include/google/protobuf/any.proto @@ -0,0 +1,154 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "types"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "AnyProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := ptypes.MarshalAny(foo) +// ... +// foo := &pb.Foo{} +// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// ... +// } +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +message Any { + // A URL/resource name that uniquely identifies the type of the serialized + // protocol buffer message. The last segment of the URL's path must represent + // the fully qualified name of the type (as in + // `path/google.protobuf.Duration`). The name should be in a canonical form + // (e.g., leading "." is not accepted). + // + // In practice, teams usually precompile into the binary all types that they + // expect it to use in the context of Any. However, for URLs which use the + // scheme `http`, `https`, or no scheme, one can optionally set up a type + // server that maps type URLs to message definitions as follows: + // + // * If no scheme is provided, `https` is assumed. + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Note: this functionality is not currently available in the official + // protobuf release, and it is not used for type URLs beginning with + // type.googleapis.com. + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + string type_url = 1; + + // Must be a valid serialized protocol buffer of the above specified type. + bytes value = 2; +} diff --git a/tikv-client-proto/include/google/protobuf/api.proto b/tikv-client-proto/include/google/protobuf/api.proto new file mode 100644 index 00000000..67c1ddbd --- /dev/null +++ b/tikv-client-proto/include/google/protobuf/api.proto @@ -0,0 +1,210 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +import "google/protobuf/source_context.proto"; +import "google/protobuf/type.proto"; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "ApiProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option go_package = "types"; + +// Api is a light-weight descriptor for an API Interface. +// +// Interfaces are also described as "protocol buffer services" in some contexts, +// such as by the "service" keyword in a .proto file, but they are different +// from API Services, which represent a concrete implementation of an interface +// as opposed to simply a description of methods and bindings. They are also +// sometimes simply referred to as "APIs" in other contexts, such as the name of +// this message itself. See https://cloud.google.com/apis/design/glossary for +// detailed terminology. +message Api { + + // The fully qualified name of this interface, including package name + // followed by the interface's simple name. + string name = 1; + + // The methods of this interface, in unspecified order. + repeated Method methods = 2; + + // Any metadata attached to the interface. + repeated Option options = 3; + + // A version string for this interface. If specified, must have the form + // `major-version.minor-version`, as in `1.10`. If the minor version is + // omitted, it defaults to zero. If the entire version field is empty, the + // major version is derived from the package name, as outlined below. If the + // field is not empty, the version in the package name will be verified to be + // consistent with what is provided here. + // + // The versioning schema uses [semantic + // versioning](http://semver.org) where the major version number + // indicates a breaking change and the minor version an additive, + // non-breaking change. Both version numbers are signals to users + // what to expect from different versions, and should be carefully + // chosen based on the product plan. + // + // The major version is also reflected in the package name of the + // interface, which must end in `v`, as in + // `google.feature.v1`. For major versions 0 and 1, the suffix can + // be omitted. Zero major versions must only be used for + // experimental, non-GA interfaces. + // + // + string version = 4; + + // Source context for the protocol buffer service represented by this + // message. + SourceContext source_context = 5; + + // Included interfaces. See [Mixin][]. + repeated Mixin mixins = 6; + + // The source syntax of the service. + Syntax syntax = 7; +} + +// Method represents a method of an API interface. +message Method { + + // The simple name of this method. + string name = 1; + + // A URL of the input message type. + string request_type_url = 2; + + // If true, the request is streamed. + bool request_streaming = 3; + + // The URL of the output message type. + string response_type_url = 4; + + // If true, the response is streamed. + bool response_streaming = 5; + + // Any metadata attached to the method. + repeated Option options = 6; + + // The source syntax of this method. + Syntax syntax = 7; +} + +// Declares an API Interface to be included in this interface. The including +// interface must redeclare all the methods from the included interface, but +// documentation and options are inherited as follows: +// +// - If after comment and whitespace stripping, the documentation +// string of the redeclared method is empty, it will be inherited +// from the original method. +// +// - Each annotation belonging to the service config (http, +// visibility) which is not set in the redeclared method will be +// inherited. +// +// - If an http annotation is inherited, the path pattern will be +// modified as follows. Any version prefix will be replaced by the +// version of the including interface plus the [root][] path if +// specified. +// +// Example of a simple mixin: +// +// package google.acl.v1; +// service AccessControl { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v1/{resource=**}:getAcl"; +// } +// } +// +// package google.storage.v2; +// service Storage { +// rpc GetAcl(GetAclRequest) returns (Acl); +// +// // Get a data record. +// rpc GetData(GetDataRequest) returns (Data) { +// option (google.api.http).get = "/v2/{resource=**}"; +// } +// } +// +// Example of a mixin configuration: +// +// apis: +// - name: google.storage.v2.Storage +// mixins: +// - name: google.acl.v1.AccessControl +// +// The mixin construct implies that all methods in `AccessControl` are +// also declared with same name and request/response types in +// `Storage`. A documentation generator or annotation processor will +// see the effective `Storage.GetAcl` method after inherting +// documentation and annotations as follows: +// +// service Storage { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v2/{resource=**}:getAcl"; +// } +// ... +// } +// +// Note how the version in the path pattern changed from `v1` to `v2`. +// +// If the `root` field in the mixin is specified, it should be a +// relative path under which inherited HTTP paths are placed. Example: +// +// apis: +// - name: google.storage.v2.Storage +// mixins: +// - name: google.acl.v1.AccessControl +// root: acls +// +// This implies the following inherited HTTP annotation: +// +// service Storage { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v2/acls/{resource=**}:getAcl"; +// } +// ... +// } +message Mixin { + // The fully qualified name of the interface which is included. + string name = 1; + + // If non-empty specifies a path under which inherited HTTP paths + // are rooted. + string root = 2; +} diff --git a/tikv-client-proto/include/google/protobuf/compiler/plugin.proto b/tikv-client-proto/include/google/protobuf/compiler/plugin.proto new file mode 100644 index 00000000..e85c852f --- /dev/null +++ b/tikv-client-proto/include/google/protobuf/compiler/plugin.proto @@ -0,0 +1,167 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// +// WARNING: The plugin interface is currently EXPERIMENTAL and is subject to +// change. +// +// protoc (aka the Protocol Compiler) can be extended via plugins. A plugin is +// just a program that reads a CodeGeneratorRequest from stdin and writes a +// CodeGeneratorResponse to stdout. +// +// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead +// of dealing with the raw protocol defined here. +// +// A plugin executable needs only to be placed somewhere in the path. The +// plugin should be named "protoc-gen-$NAME", and will then be used when the +// flag "--${NAME}_out" is passed to protoc. + +syntax = "proto2"; +package google.protobuf.compiler; +option java_package = "com.google.protobuf.compiler"; +option java_outer_classname = "PluginProtos"; + +option go_package = "plugin_go"; + +import "google/protobuf/descriptor.proto"; + +// The version number of protocol compiler. +message Version { + optional int32 major = 1; + optional int32 minor = 2; + optional int32 patch = 3; + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + optional string suffix = 4; +} + +// An encoded CodeGeneratorRequest is written to the plugin's stdin. +message CodeGeneratorRequest { + // The .proto files that were explicitly listed on the command-line. The + // code generator should generate code only for these files. Each file's + // descriptor will be included in proto_file, below. + repeated string file_to_generate = 1; + + // The generator parameter passed on the command-line. + optional string parameter = 2; + + // FileDescriptorProtos for all files in files_to_generate and everything + // they import. The files will appear in topological order, so each file + // appears before any file that imports it. + // + // protoc guarantees that all proto_files will be written after + // the fields above, even though this is not technically guaranteed by the + // protobuf wire format. This theoretically could allow a plugin to stream + // in the FileDescriptorProtos and handle them one by one rather than read + // the entire set into memory at once. However, as of this writing, this + // is not similarly optimized on protoc's end -- it will store all fields in + // memory at once before sending them to the plugin. + // + // Type names of fields and extensions in the FileDescriptorProto are always + // fully qualified. + repeated FileDescriptorProto proto_file = 15; + + // The version number of protocol compiler. + optional Version compiler_version = 3; + +} + +// The plugin writes an encoded CodeGeneratorResponse to stdout. +message CodeGeneratorResponse { + // Error message. If non-empty, code generation failed. The plugin process + // should exit with status code zero even if it reports an error in this way. + // + // This should be used to indicate errors in .proto files which prevent the + // code generator from generating correct code. Errors which indicate a + // problem in protoc itself -- such as the input CodeGeneratorRequest being + // unparseable -- should be reported by writing a message to stderr and + // exiting with a non-zero status code. + optional string error = 1; + + // Represents a single generated file. + message File { + // The file name, relative to the output directory. The name must not + // contain "." or ".." components and must be relative, not be absolute (so, + // the file cannot lie outside the output directory). "/" must be used as + // the path separator, not "\". + // + // If the name is omitted, the content will be appended to the previous + // file. This allows the generator to break large files into small chunks, + // and allows the generated text to be streamed back to protoc so that large + // files need not reside completely in memory at one time. Note that as of + // this writing protoc does not optimize for this -- it will read the entire + // CodeGeneratorResponse before writing files to disk. + optional string name = 1; + + // If non-empty, indicates that the named file should already exist, and the + // content here is to be inserted into that file at a defined insertion + // point. This feature allows a code generator to extend the output + // produced by another code generator. The original generator may provide + // insertion points by placing special annotations in the file that look + // like: + // @@protoc_insertion_point(NAME) + // The annotation can have arbitrary text before and after it on the line, + // which allows it to be placed in a comment. NAME should be replaced with + // an identifier naming the point -- this is what other generators will use + // as the insertion_point. Code inserted at this point will be placed + // immediately above the line containing the insertion point (thus multiple + // insertions to the same point will come out in the order they were added). + // The double-@ is intended to make it unlikely that the generated code + // could contain things that look like insertion points by accident. + // + // For example, the C++ code generator places the following line in the + // .pb.h files that it generates: + // // @@protoc_insertion_point(namespace_scope) + // This line appears within the scope of the file's package namespace, but + // outside of any particular class. Another plugin can then specify the + // insertion_point "namespace_scope" to generate additional classes or + // other declarations that should be placed in this scope. + // + // Note that if the line containing the insertion point begins with + // whitespace, the same whitespace will be added to every line of the + // inserted text. This is useful for languages like Python, where + // indentation matters. In these languages, the insertion point comment + // should be indented the same amount as any inserted code will need to be + // in order to work correctly in that context. + // + // The code generator that generates the initial file and the one which + // inserts into it must both run as part of a single invocation of protoc. + // Code generators are executed in the order in which they appear on the + // command line. + // + // If |insertion_point| is present, |name| must also be present. + optional string insertion_point = 2; + + // The file contents. + optional string content = 15; + } + repeated File file = 15; +} diff --git a/tikv-client-proto/include/google/protobuf/descriptor.proto b/tikv-client-proto/include/google/protobuf/descriptor.proto new file mode 100644 index 00000000..1598ad7c --- /dev/null +++ b/tikv-client-proto/include/google/protobuf/descriptor.proto @@ -0,0 +1,872 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// The messages in this file describe the definitions found in .proto files. +// A valid .proto file can be translated directly to a FileDescriptorProto +// without any other information (e.g. without reading its imports). + + +syntax = "proto2"; + +package google.protobuf; +option go_package = "descriptor"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DescriptorProtos"; +option csharp_namespace = "Google.Protobuf.Reflection"; +option objc_class_prefix = "GPB"; +option cc_enable_arenas = true; + +// descriptor.proto must be optimized for speed because reflection-based +// algorithms don't work during bootstrapping. +option optimize_for = SPEED; + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +message FileDescriptorSet { + repeated FileDescriptorProto file = 1; +} + +// Describes a complete .proto file. +message FileDescriptorProto { + optional string name = 1; // file name, relative to root of source tree + optional string package = 2; // e.g. "foo", "foo.bar", etc. + + // Names of files imported by this file. + repeated string dependency = 3; + // Indexes of the public imported files in the dependency list above. + repeated int32 public_dependency = 10; + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + repeated int32 weak_dependency = 11; + + // All top-level definitions in this file. + repeated DescriptorProto message_type = 4; + repeated EnumDescriptorProto enum_type = 5; + repeated ServiceDescriptorProto service = 6; + repeated FieldDescriptorProto extension = 7; + + optional FileOptions options = 8; + + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + optional SourceCodeInfo source_code_info = 9; + + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + optional string syntax = 12; +} + +// Describes a message type. +message DescriptorProto { + optional string name = 1; + + repeated FieldDescriptorProto field = 2; + repeated FieldDescriptorProto extension = 6; + + repeated DescriptorProto nested_type = 3; + repeated EnumDescriptorProto enum_type = 4; + + message ExtensionRange { + optional int32 start = 1; + optional int32 end = 2; + + optional ExtensionRangeOptions options = 3; + } + repeated ExtensionRange extension_range = 5; + + repeated OneofDescriptorProto oneof_decl = 8; + + optional MessageOptions options = 7; + + // Range of reserved tag numbers. Reserved tag numbers may not be used by + // fields or extension ranges in the same message. Reserved ranges may + // not overlap. + message ReservedRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Exclusive. + } + repeated ReservedRange reserved_range = 9; + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + repeated string reserved_name = 10; +} + +message ExtensionRangeOptions { + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +// Describes a field within a message. +message FieldDescriptorProto { + enum Type { + // 0 is reserved for errors. + // Order is weird for historical reasons. + TYPE_DOUBLE = 1; + TYPE_FLOAT = 2; + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + TYPE_INT64 = 3; + TYPE_UINT64 = 4; + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + TYPE_INT32 = 5; + TYPE_FIXED64 = 6; + TYPE_FIXED32 = 7; + TYPE_BOOL = 8; + TYPE_STRING = 9; + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + TYPE_GROUP = 10; + TYPE_MESSAGE = 11; // Length-delimited aggregate. + + // New in version 2. + TYPE_BYTES = 12; + TYPE_UINT32 = 13; + TYPE_ENUM = 14; + TYPE_SFIXED32 = 15; + TYPE_SFIXED64 = 16; + TYPE_SINT32 = 17; // Uses ZigZag encoding. + TYPE_SINT64 = 18; // Uses ZigZag encoding. + }; + + enum Label { + // 0 is reserved for errors + LABEL_OPTIONAL = 1; + LABEL_REQUIRED = 2; + LABEL_REPEATED = 3; + }; + + optional string name = 1; + optional int32 number = 3; + optional Label label = 4; + + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + optional Type type = 5; + + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + optional string type_name = 6; + + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + optional string extendee = 2; + + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + optional string default_value = 7; + + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + optional int32 oneof_index = 9; + + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + optional string json_name = 10; + + optional FieldOptions options = 8; +} + +// Describes a oneof. +message OneofDescriptorProto { + optional string name = 1; + optional OneofOptions options = 2; +} + +// Describes an enum type. +message EnumDescriptorProto { + optional string name = 1; + + repeated EnumValueDescriptorProto value = 2; + + optional EnumOptions options = 3; + + // Range of reserved numeric values. Reserved values may not be used by + // entries in the same enum. Reserved ranges may not overlap. + // + // Note that this is distinct from DescriptorProto.ReservedRange in that it + // is inclusive such that it can appropriately represent the entire int32 + // domain. + message EnumReservedRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Inclusive. + } + + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + repeated EnumReservedRange reserved_range = 4; + + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + repeated string reserved_name = 5; +} + +// Describes a value within an enum. +message EnumValueDescriptorProto { + optional string name = 1; + optional int32 number = 2; + + optional EnumValueOptions options = 3; +} + +// Describes a service. +message ServiceDescriptorProto { + optional string name = 1; + repeated MethodDescriptorProto method = 2; + + optional ServiceOptions options = 3; +} + +// Describes a method of a service. +message MethodDescriptorProto { + optional string name = 1; + + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + optional string input_type = 2; + optional string output_type = 3; + + optional MethodOptions options = 4; + + // Identifies if client streams multiple client messages + optional bool client_streaming = 5 [default=false]; + // Identifies if server streams multiple server messages + optional bool server_streaming = 6 [default=false]; +} + + +// =================================================================== +// Options + +// Each of the definitions above may have "options" attached. These are +// just annotations which may cause code to be generated slightly differently +// or may contain hints for code that manipulates protocol messages. +// +// Clients may define custom options as extensions of the *Options messages. +// These extensions may not yet be known at parsing time, so the parser cannot +// store the values in them. Instead it stores them in a field in the *Options +// message called uninterpreted_option. This field must have the same name +// across all *Options messages. We then use this field to populate the +// extensions when we build a descriptor, at which point all protos have been +// parsed and so all extensions are known. +// +// Extension numbers for custom options may be chosen as follows: +// * For options which will only be used within a single application or +// organization, or for experimental options, use field numbers 50000 +// through 99999. It is up to you to ensure that you do not use the +// same number for multiple options. +// * For options which will be published and used publicly by multiple +// independent entities, e-mail protobuf-global-extension-registry@google.com +// to reserve extension numbers. Simply provide your project name (e.g. +// Objective-C plugin) and your project website (if available) -- there's no +// need to explain how you intend to use them. Usually you only need one +// extension number. You can declare multiple options with only one extension +// number by putting them in a sub-message. See the Custom Options section of +// the docs for examples: +// https://developers.google.com/protocol-buffers/docs/proto#options +// If this turns out to be popular, a web service will be set up +// to automatically assign option numbers. + + +message FileOptions { + + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + optional string java_package = 1; + + + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + optional string java_outer_classname = 8; + + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + optional bool java_multiple_files = 10 [default=false]; + + // This option does nothing. + optional bool java_generate_equals_and_hash = 20 [deprecated=true]; + + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + optional bool java_string_check_utf8 = 27 [default=false]; + + + // Generated classes can be optimized for speed or code size. + enum OptimizeMode { + SPEED = 1; // Generate complete code for parsing, serialization, + // etc. + CODE_SIZE = 2; // Use ReflectionOps to implement these methods. + LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime. + } + optional OptimizeMode optimize_for = 9 [default=SPEED]; + + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + optional string go_package = 11; + + + + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + optional bool cc_generic_services = 16 [default=false]; + optional bool java_generic_services = 17 [default=false]; + optional bool py_generic_services = 18 [default=false]; + optional bool php_generic_services = 42 [default=false]; + + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + optional bool deprecated = 23 [default=false]; + + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + optional bool cc_enable_arenas = 31 [default=false]; + + + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + optional string objc_class_prefix = 36; + + // Namespace for generated classes; defaults to the package. + optional string csharp_namespace = 37; + + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + optional string swift_prefix = 39; + + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + optional string php_class_prefix = 40; + + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + optional string php_namespace = 41; + + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. + // See the documentation for the "Options" section above. + extensions 1000 to max; + + //reserved 38; +} + +message MessageOptions { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + optional bool message_set_wire_format = 1 [default=false]; + + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + optional bool no_standard_descriptor_accessor = 2 [default=false]; + + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + optional bool deprecated = 3 [default=false]; + + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementions still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + optional bool map_entry = 7; + + //reserved 8; // javalite_serializable + //reserved 9; // javanano_as_lite + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message FieldOptions { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + optional CType ctype = 1 [default = STRING]; + enum CType { + // Default mode. + STRING = 0; + + CORD = 1; + + STRING_PIECE = 2; + } + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + optional bool packed = 2; + + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + optional JSType jstype = 6 [default = JS_NORMAL]; + enum JSType { + // Use the default type. + JS_NORMAL = 0; + + // Use JavaScript strings. + JS_STRING = 1; + + // Use JavaScript numbers. + JS_NUMBER = 2; + } + + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + optional bool lazy = 5 [default=false]; + + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + optional bool deprecated = 3 [default=false]; + + // For Google-internal migration only. Do not use. + optional bool weak = 10 [default=false]; + + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; + + //reserved 4; // removed jtype +} + +message OneofOptions { + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message EnumOptions { + + // Set this option to true to allow mapping different tag names to the same + // value. + optional bool allow_alias = 2; + + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + optional bool deprecated = 3 [default=false]; + + //reserved 5; // javanano_as_lite + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message EnumValueOptions { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + optional bool deprecated = 1 [default=false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message ServiceOptions { + + // Note: Field numbers 1 through 32 are reserved for Google's internal RPC + // framework. We apologize for hoarding these numbers to ourselves, but + // we were already using them long before we decided to release Protocol + // Buffers. + + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + optional bool deprecated = 33 [default=false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message MethodOptions { + + // Note: Field numbers 1 through 32 are reserved for Google's internal RPC + // framework. We apologize for hoarding these numbers to ourselves, but + // we were already using them long before we decided to release Protocol + // Buffers. + + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + optional bool deprecated = 33 [default=false]; + + // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, + // or neither? HTTP based RPC implementation may choose GET verb for safe + // methods, and PUT verb for idempotent methods instead of the default POST. + enum IdempotencyLevel { + IDEMPOTENCY_UNKNOWN = 0; + NO_SIDE_EFFECTS = 1; // implies idempotent + IDEMPOTENT = 2; // idempotent, but may have side effects + } + optional IdempotencyLevel idempotency_level = + 34 [default=IDEMPOTENCY_UNKNOWN]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +message UninterpretedOption { + // The name of the uninterpreted option. Each string represents a segment in + // a dot-separated name. is_extension is true iff a segment represents an + // extension (denoted with parentheses in options specs in .proto files). + // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents + // "foo.(bar.baz).qux". + message NamePart { + required string name_part = 1; + required bool is_extension = 2; + } + repeated NamePart name = 2; + + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + optional string identifier_value = 3; + optional uint64 positive_int_value = 4; + optional int64 negative_int_value = 5; + optional double double_value = 6; + optional bytes string_value = 7; + optional string aggregate_value = 8; +} + +// =================================================================== +// Optional source code info + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +message SourceCodeInfo { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendent. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + repeated Location location = 1; + message Location { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + repeated int32 path = 1 [packed=true]; + + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + repeated int32 span = 2 [packed=true]; + + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + optional string leading_comments = 3; + optional string trailing_comments = 4; + repeated string leading_detached_comments = 6; + } +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +message GeneratedCodeInfo { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + repeated Annotation annotation = 1; + message Annotation { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + repeated int32 path = 1 [packed=true]; + + // Identifies the filesystem path to the original source .proto. + optional string source_file = 2; + + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + optional int32 begin = 3; + + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + optional int32 end = 4; + } +} diff --git a/tikv-client-proto/include/google/protobuf/duration.proto b/tikv-client-proto/include/google/protobuf/duration.proto new file mode 100644 index 00000000..8bbaa8b6 --- /dev/null +++ b/tikv-client-proto/include/google/protobuf/duration.proto @@ -0,0 +1,117 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "types"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DurationProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// +// +message Duration { + + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + int64 seconds = 1; + + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + int32 nanos = 2; +} diff --git a/tikv-client-proto/include/google/protobuf/empty.proto b/tikv-client-proto/include/google/protobuf/empty.proto new file mode 100644 index 00000000..6057c852 --- /dev/null +++ b/tikv-client-proto/include/google/protobuf/empty.proto @@ -0,0 +1,52 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "types"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "EmptyProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option cc_enable_arenas = true; + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +message Empty {} diff --git a/tikv-client-proto/include/google/protobuf/field_mask.proto b/tikv-client-proto/include/google/protobuf/field_mask.proto new file mode 100644 index 00000000..12161981 --- /dev/null +++ b/tikv-client-proto/include/google/protobuf/field_mask.proto @@ -0,0 +1,252 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "FieldMaskProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option go_package = "types"; + +// `FieldMask` represents a set of symbolic field paths, for example: +// +// paths: "f.a" +// paths: "f.b.d" +// +// Here `f` represents a field in some root message, `a` and `b` +// fields in the message found in `f`, and `d` a field found in the +// message in `f.b`. +// +// Field masks are used to specify a subset of fields that should be +// returned by a get operation or modified by an update operation. +// Field masks also have a custom JSON encoding (see below). +// +// # Field Masks in Projections +// +// When used in the context of a projection, a response message or +// sub-message is filtered by the API to only contain those fields as +// specified in the mask. For example, if the mask in the previous +// example is applied to a response message as follows: +// +// f { +// a : 22 +// b { +// d : 1 +// x : 2 +// } +// y : 13 +// } +// z: 8 +// +// The result will not contain specific values for fields x,y and z +// (their value will be set to the default, and omitted in proto text +// output): +// +// +// f { +// a : 22 +// b { +// d : 1 +// } +// } +// +// A repeated field is not allowed except at the last position of a +// paths string. +// +// If a FieldMask object is not present in a get operation, the +// operation applies to all fields (as if a FieldMask of all fields +// had been specified). +// +// Note that a field mask does not necessarily apply to the +// top-level response message. In case of a REST get operation, the +// field mask applies directly to the response, but in case of a REST +// list operation, the mask instead applies to each individual message +// in the returned resource list. In case of a REST custom method, +// other definitions may be used. Where the mask applies will be +// clearly documented together with its declaration in the API. In +// any case, the effect on the returned resource/resources is required +// behavior for APIs. +// +// # Field Masks in Update Operations +// +// A field mask in update operations specifies which fields of the +// targeted resource are going to be updated. The API is required +// to only change the values of the fields as specified in the mask +// and leave the others untouched. If a resource is passed in to +// describe the updated values, the API ignores the values of all +// fields not covered by the mask. +// +// If a repeated field is specified for an update operation, the existing +// repeated values in the target resource will be overwritten by the new values. +// Note that a repeated field is only allowed in the last position of a `paths` +// string. +// +// If a sub-message is specified in the last position of the field mask for an +// update operation, then the existing sub-message in the target resource is +// overwritten. Given the target message: +// +// f { +// b { +// d : 1 +// x : 2 +// } +// c : 1 +// } +// +// And an update message: +// +// f { +// b { +// d : 10 +// } +// } +// +// then if the field mask is: +// +// paths: "f.b" +// +// then the result will be: +// +// f { +// b { +// d : 10 +// } +// c : 1 +// } +// +// However, if the update mask was: +// +// paths: "f.b.d" +// +// then the result would be: +// +// f { +// b { +// d : 10 +// x : 2 +// } +// c : 1 +// } +// +// In order to reset a field's value to the default, the field must +// be in the mask and set to the default value in the provided resource. +// Hence, in order to reset all fields of a resource, provide a default +// instance of the resource and set all fields in the mask, or do +// not provide a mask as described below. +// +// If a field mask is not present on update, the operation applies to +// all fields (as if a field mask of all fields has been specified). +// Note that in the presence of schema evolution, this may mean that +// fields the client does not know and has therefore not filled into +// the request will be reset to their default. If this is unwanted +// behavior, a specific service may require a client to always specify +// a field mask, producing an error if not. +// +// As with get operations, the location of the resource which +// describes the updated values in the request message depends on the +// operation kind. In any case, the effect of the field mask is +// required to be honored by the API. +// +// ## Considerations for HTTP REST +// +// The HTTP kind of an update operation which uses a field mask must +// be set to PATCH instead of PUT in order to satisfy HTTP semantics +// (PUT must only be used for full updates). +// +// # JSON Encoding of Field Masks +// +// In JSON, a field mask is encoded as a single string where paths are +// separated by a comma. Fields name in each path are converted +// to/from lower-camel naming conventions. +// +// As an example, consider the following message declarations: +// +// message Profile { +// User user = 1; +// Photo photo = 2; +// } +// message User { +// string display_name = 1; +// string address = 2; +// } +// +// In proto a field mask for `Profile` may look as such: +// +// mask { +// paths: "user.display_name" +// paths: "photo" +// } +// +// In JSON, the same mask is represented as below: +// +// { +// mask: "user.displayName,photo" +// } +// +// # Field Masks and Oneof Fields +// +// Field masks treat fields in oneofs just as regular fields. Consider the +// following message: +// +// message SampleMessage { +// oneof test_oneof { +// string name = 4; +// SubMessage sub_message = 9; +// } +// } +// +// The field mask can be: +// +// mask { +// paths: "name" +// } +// +// Or: +// +// mask { +// paths: "sub_message" +// } +// +// Note that oneof type names ("test_oneof" in this case) cannot be used in +// paths. +// +// ## Field Mask Verification +// +// The implementation of the all the API methods, which have any FieldMask type +// field in the request, should verify the included field paths, and return +// `INVALID_ARGUMENT` error if any path is duplicated or unmappable. +message FieldMask { + // The set of field mask paths. + repeated string paths = 1; +} diff --git a/tikv-client-proto/include/google/protobuf/source_context.proto b/tikv-client-proto/include/google/protobuf/source_context.proto new file mode 100644 index 00000000..8654578c --- /dev/null +++ b/tikv-client-proto/include/google/protobuf/source_context.proto @@ -0,0 +1,48 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "SourceContextProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option go_package = "types"; + +// `SourceContext` represents information about the source of a +// protobuf element, like the file in which it is defined. +message SourceContext { + // The path-qualified name of the .proto file that contained the associated + // protobuf element. For example: `"google/protobuf/source_context.proto"`. + string file_name = 1; +} diff --git a/tikv-client-proto/include/google/protobuf/struct.proto b/tikv-client-proto/include/google/protobuf/struct.proto new file mode 100644 index 00000000..4f78641f --- /dev/null +++ b/tikv-client-proto/include/google/protobuf/struct.proto @@ -0,0 +1,96 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "types"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "StructProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +message Struct { + // Unordered map of dynamically typed values. + map fields = 1; +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +message Value { + // The kind of value. + oneof kind { + // Represents a null value. + NullValue null_value = 1; + // Represents a double value. + double number_value = 2; + // Represents a string value. + string string_value = 3; + // Represents a boolean value. + bool bool_value = 4; + // Represents a structured value. + Struct struct_value = 5; + // Represents a repeated `Value`. + ListValue list_value = 6; + } +} + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +enum NullValue { + // Null value. + NULL_VALUE = 0; +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +message ListValue { + // Repeated field of dynamically typed values. + repeated Value values = 1; +} diff --git a/tikv-client-proto/include/google/protobuf/timestamp.proto b/tikv-client-proto/include/google/protobuf/timestamp.proto new file mode 100644 index 00000000..150468b5 --- /dev/null +++ b/tikv-client-proto/include/google/protobuf/timestamp.proto @@ -0,0 +1,135 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "types"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "TimestampProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// A Timestamp represents a point in time independent of any time zone +// or calendar, represented as seconds and fractions of seconds at +// nanosecond resolution in UTC Epoch time. It is encoded using the +// Proleptic Gregorian Calendar which extends the Gregorian calendar +// backwards to year one. It is encoded assuming all minutes are 60 +// seconds long, i.e. leap seconds are "smeared" so that no leap second +// table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +// By restricting to that range, we ensure that we can convert to +// and from RFC 3339 date strings. +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required. A proto3 JSON serializer should always use UTC (as indicated by +// "Z") when printing the Timestamp type and a proto3 JSON parser should be +// able to accept both UTC and other timezones (as indicated by an offset). +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) +// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one +// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime-- +// ) to obtain a formatter capable of generating timestamps in this format. +// +// +message Timestamp { + + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + int32 nanos = 2; +} diff --git a/tikv-client-proto/include/google/protobuf/type.proto b/tikv-client-proto/include/google/protobuf/type.proto new file mode 100644 index 00000000..fcd15bfd --- /dev/null +++ b/tikv-client-proto/include/google/protobuf/type.proto @@ -0,0 +1,187 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +import "google/protobuf/any.proto"; +import "google/protobuf/source_context.proto"; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option java_package = "com.google.protobuf"; +option java_outer_classname = "TypeProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option go_package = "types"; + +// A protocol buffer message type. +message Type { + // The fully qualified message name. + string name = 1; + // The list of fields. + repeated Field fields = 2; + // The list of types appearing in `oneof` definitions in this type. + repeated string oneofs = 3; + // The protocol buffer options. + repeated Option options = 4; + // The source context. + SourceContext source_context = 5; + // The source syntax. + Syntax syntax = 6; +} + +// A single field of a message type. +message Field { + // Basic field types. + enum Kind { + // Field type unknown. + TYPE_UNKNOWN = 0; + // Field type double. + TYPE_DOUBLE = 1; + // Field type float. + TYPE_FLOAT = 2; + // Field type int64. + TYPE_INT64 = 3; + // Field type uint64. + TYPE_UINT64 = 4; + // Field type int32. + TYPE_INT32 = 5; + // Field type fixed64. + TYPE_FIXED64 = 6; + // Field type fixed32. + TYPE_FIXED32 = 7; + // Field type bool. + TYPE_BOOL = 8; + // Field type string. + TYPE_STRING = 9; + // Field type group. Proto2 syntax only, and deprecated. + TYPE_GROUP = 10; + // Field type message. + TYPE_MESSAGE = 11; + // Field type bytes. + TYPE_BYTES = 12; + // Field type uint32. + TYPE_UINT32 = 13; + // Field type enum. + TYPE_ENUM = 14; + // Field type sfixed32. + TYPE_SFIXED32 = 15; + // Field type sfixed64. + TYPE_SFIXED64 = 16; + // Field type sint32. + TYPE_SINT32 = 17; + // Field type sint64. + TYPE_SINT64 = 18; + }; + + // Whether a field is optional, required, or repeated. + enum Cardinality { + // For fields with unknown cardinality. + CARDINALITY_UNKNOWN = 0; + // For optional fields. + CARDINALITY_OPTIONAL = 1; + // For required fields. Proto2 syntax only. + CARDINALITY_REQUIRED = 2; + // For repeated fields. + CARDINALITY_REPEATED = 3; + }; + + // The field type. + Kind kind = 1; + // The field cardinality. + Cardinality cardinality = 2; + // The field number. + int32 number = 3; + // The field name. + string name = 4; + // The field type URL, without the scheme, for message or enumeration + // types. Example: `"type.googleapis.com/google.protobuf.Timestamp"`. + string type_url = 6; + // The index of the field type in `Type.oneofs`, for message or enumeration + // types. The first type has index 1; zero means the type is not in the list. + int32 oneof_index = 7; + // Whether to use alternative packed wire representation. + bool packed = 8; + // The protocol buffer options. + repeated Option options = 9; + // The field JSON name. + string json_name = 10; + // The string value of the default value of this field. Proto2 syntax only. + string default_value = 11; +} + +// Enum type definition. +message Enum { + // Enum type name. + string name = 1; + // Enum value definitions. + repeated EnumValue enumvalue = 2; + // Protocol buffer options. + repeated Option options = 3; + // The source context. + SourceContext source_context = 4; + // The source syntax. + Syntax syntax = 5; +} + +// Enum value definition. +message EnumValue { + // Enum value name. + string name = 1; + // Enum value number. + int32 number = 2; + // Protocol buffer options. + repeated Option options = 3; +} + +// A protocol buffer option, which can be attached to a message, field, +// enumeration, etc. +message Option { + // The option's name. For protobuf built-in options (options defined in + // descriptor.proto), this is the short name. For example, `"map_entry"`. + // For custom options, it should be the fully-qualified name. For example, + // `"google.api.http"`. + string name = 1; + // The option's value packed in an Any message. If the value is a primitive, + // the corresponding wrapper type defined in google/protobuf/wrappers.proto + // should be used. If the value is an enum, it should be stored as an int32 + // value using the google.protobuf.Int32Value type. + Any value = 2; +} + +// The syntax in which a protocol buffer element is defined. +enum Syntax { + // Syntax `proto2`. + SYNTAX_PROTO2 = 0; + // Syntax `proto3`. + SYNTAX_PROTO3 = 1; +} diff --git a/tikv-client-proto/include/google/protobuf/wrappers.proto b/tikv-client-proto/include/google/protobuf/wrappers.proto new file mode 100644 index 00000000..c5632e5c --- /dev/null +++ b/tikv-client-proto/include/google/protobuf/wrappers.proto @@ -0,0 +1,118 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Wrappers for primitive (non-message) types. These types are useful +// for embedding primitives in the `google.protobuf.Any` type and for places +// where we need to distinguish between the absence of a primitive +// typed field and its default value. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "types"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "WrappersProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +message DoubleValue { + // The double value. + double value = 1; +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +message FloatValue { + // The float value. + float value = 1; +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +message Int64Value { + // The int64 value. + int64 value = 1; +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +message UInt64Value { + // The uint64 value. + uint64 value = 1; +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +message Int32Value { + // The int32 value. + int32 value = 1; +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +message UInt32Value { + // The uint32 value. + uint32 value = 1; +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +message BoolValue { + // The bool value. + bool value = 1; +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +message StringValue { + // The string value. + string value = 1; +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +message BytesValue { + // The bytes value. + bytes value = 1; +} diff --git a/tikv-client-proto/include/rustproto.proto b/tikv-client-proto/include/rustproto.proto new file mode 100644 index 00000000..83e76fdf --- /dev/null +++ b/tikv-client-proto/include/rustproto.proto @@ -0,0 +1,47 @@ +syntax = "proto2"; + +import "google/protobuf/descriptor.proto"; + +// see https://github.com/gogo/protobuf/blob/master/gogoproto/gogo.proto +// for the original idea + +package rustproto; + +extend google.protobuf.FileOptions { + // When true, oneof field is generated public + optional bool expose_oneof_all = 17001; + // When true all fields are public, and not accessors generated + optional bool expose_fields_all = 17003; + // When false, `get_`, `set_`, `mut_` etc. accessors are not generated + optional bool generate_accessors_all = 17004; + // Use `bytes::Bytes` for `bytes` fields + optional bool carllerche_bytes_for_bytes_all = 17011; + // Use `bytes::Bytes` for `string` fields + optional bool carllerche_bytes_for_string_all = 17012; + // When true, will only generate codes that works with lite runtime. + optional bool lite_runtime_all = 17035; +} + +extend google.protobuf.MessageOptions { + // When true, oneof field is generated public + optional bool expose_oneof = 17001; + // When true all fields are public, and not accessors generated + optional bool expose_fields = 17003; + // When false, `get_`, `set_`, `mut_` etc. accessors are not generated + optional bool generate_accessors = 17004; + // Use `bytes::Bytes` for `bytes` fields + optional bool carllerche_bytes_for_bytes = 17011; + // Use `bytes::Bytes` for `string` fields + optional bool carllerche_bytes_for_string = 17012; +} + +extend google.protobuf.FieldOptions { + // When true all fields are public, and not accessors generated + optional bool expose_fields_field = 17003; + // When false, `get_`, `set_`, `mut_` etc. accessors are not generated + optional bool generate_accessors_field = 17004; + // Use `bytes::Bytes` for `bytes` fields + optional bool carllerche_bytes_for_bytes_field = 17011; + // Use `bytes::Bytes` for `string` fields + optional bool carllerche_bytes_for_string_field = 17012; +} \ No newline at end of file diff --git a/tikv-client-proto/proto/backup.proto b/tikv-client-proto/proto/backup.proto new file mode 100644 index 00000000..7efe6c70 --- /dev/null +++ b/tikv-client-proto/proto/backup.proto @@ -0,0 +1,192 @@ +syntax = "proto3"; +package backup; + +import "kvrpcpb.proto"; +import "errorpb.proto"; + +import "gogoproto/gogo.proto"; +import "rustproto.proto"; + +option (gogoproto.sizer_all) = true; +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (rustproto.lite_runtime_all) = true; + +option java_package = "org.tikv.kvproto"; + +// The message save the metadata of a backup. +message BackupMeta { + // ID and version of backuped cluster. + uint64 cluster_id = 1; + string cluster_version = 2; + + // path field is no longer used. + reserved 3; reserved "path"; + // A set of files that compose a backup. + repeated File files = 4; + + // A pair of timestamp specifies a time range of a backup. + // For full backup, the start_version equals to the end_version, + // it means point in time. + // For incremental backup, the time range is specified as + // (start_version, end_version]. + uint64 start_version = 5; + uint64 end_version = 6; + + // Additional metadata describes database and table info. + repeated Schema schemas = 7; + + // If in raw kv mode, `start_versions`, `end_versions` and `schemas` will be ignored, and the + // backup data's range is represented by raw_ranges. + bool is_raw_kv = 8; + repeated RawRange raw_ranges = 9; + + // In incremental backup, DDLs which are completed in (lastBackupTS, backupTS] will be stored here. + bytes ddls = 10; +} + +message File { + string name = 1; + bytes sha256 = 2; + + bytes start_key = 3; + bytes end_key = 4; + uint64 start_version = 5; + uint64 end_version = 6; + + uint64 crc64xor = 7; + uint64 total_kvs = 8; + uint64 total_bytes = 9; + + string cf = 10; + + uint64 size = 11; +} + +message Schema { + bytes db = 1; + bytes table = 2; + + uint64 crc64xor = 3; + uint64 total_kvs = 4; + uint64 total_bytes = 5; + + uint32 tiflash_replicas = 6; +} + +message RawRange { + bytes start_key = 1; + bytes end_key = 2; + string cf = 3; +} + +message ClusterIDError { + uint64 current = 1; + uint64 request = 2; +} + +message Error { + string msg = 1; + oneof detail { + ClusterIDError cluster_id_error = 3; + kvrpcpb.KeyError kv_error = 4; + errorpb.Error region_error = 5; + } +} + +// sst files compression algorithm +enum CompressionType { + UNKNOWN = 0; + LZ4 = 1; + SNAPPY = 2; + ZSTD = 3; +} + +message BackupRequest { + uint64 cluster_id = 1; + + bytes start_key = 2; + bytes end_key = 3; + uint64 start_version = 4; + uint64 end_version = 5; + + // path field is deprecated, use storage_backend instead + reserved 6; reserved "path"; + + // The I/O rate limit for backup request. + uint64 rate_limit = 7; + // The concurrency for executing the backup request in every tikv node. + uint32 concurrency = 8; + + StorageBackend storage_backend = 9; + + // If raw kv mode is enabled, `start_version` and `end_version` will be ignored, and `cf` + // specifies which cf to backup. + bool is_raw_kv = 10; + string cf = 11; + // algorithm used for compress sst files + CompressionType compression_type = 12; + // sst compression level, some algorithms support negative compression levels + int32 compression_level = 13; +} + +message StorageBackend { + oneof backend { + Noop noop = 1; + Local local = 2; + S3 s3 = 3; + GCS gcs = 4; + } +} + +// Noop storage backend saves files into void. +message Noop {} + +// Local storage backend saves files into local disk +message Local { + string path = 1; +} + +// S3 storage backend saves files into S3 compatible storages +// For non-aws providers, endpoint must be provided +message S3 { + string endpoint = 1; + string region = 2; + string bucket = 3; + string prefix = 4; + string storage_class = 5; + // server side encryption + string sse = 6; + string acl = 7; + string access_key = 8; + string secret_access_key = 9; + bool force_path_style = 10; + string sse_kms_key_id = 11; +} + +// GCS storage backend saves files into google cloud storage. +message GCS { + string endpoint = 1; + string bucket = 2; + string prefix = 3; + string storage_class = 4; + // If not empty, applies a predefined set of access controls. + // See https://cloud.google.com/storage/docs/access-control/lists#predefined-acl + // for valid values. + string predefined_acl = 5; + // Service Account Credentials JSON blob + // You can get one from https://console.cloud.google.com/apis/credentials, and + // copy the content, set it as string here. + string credentials_blob = 6; +} + +message BackupResponse { + Error error = 1; + bytes start_key = 2; + bytes end_key = 3; + repeated File files = 4; +} + +service Backup { + rpc backup(BackupRequest) returns (stream BackupResponse) {} +} diff --git a/tikv-client-proto/proto/cdcpb.proto b/tikv-client-proto/proto/cdcpb.proto new file mode 100644 index 00000000..0a094eb7 --- /dev/null +++ b/tikv-client-proto/proto/cdcpb.proto @@ -0,0 +1,147 @@ +syntax = "proto3"; +package cdcpb; + +import "raft_cmdpb.proto"; +import "metapb.proto"; +import "errorpb.proto"; +import "kvrpcpb.proto"; + +import "gogoproto/gogo.proto"; +import "rustproto.proto"; + +option(gogoproto.sizer_all) = true; +option(gogoproto.marshaler_all) = true; +option(gogoproto.unmarshaler_all) = true; +option(rustproto.lite_runtime_all) = true; + +option java_package = "org.tikv.kvproto"; + +message Header { + uint64 cluster_id = 1; + string ticdc_version = 2; +} + +message DuplicateRequest { + uint64 region_id = 1; +} + +message Compatibility { + string required_version = 1; +} + +message Error { + errorpb.NotLeader not_leader = 1; + errorpb.RegionNotFound region_not_found = 2; + errorpb.EpochNotMatch epoch_not_match = 3; + DuplicateRequest duplicate_request = 4; + Compatibility compatibility = 5; +} + +message TxnInfo { + uint64 start_ts = 1; + bytes primary = 2; +} + +message TxnStatus { + uint64 start_ts = 1; + uint64 min_commit_ts = 2; + uint64 commit_ts = 3; + bool is_rolled_back = 4; +} + +message Event { + enum LogType { + UNKNOWN = 0; + PREWRITE = 1; + COMMIT = 2; + ROLLBACK = 3; + COMMITTED = 4; + INITIALIZED = 5; + } + + message Row { + uint64 start_ts = 1; + uint64 commit_ts = 2; + LogType type = 3; + + enum OpType { + UNKNOWN = 0; + PUT = 1; + DELETE = 2; + } + OpType op_type = 4; + bytes key = 5; + bytes value = 6; + bytes old_value = 7; + } + + message Entries { + repeated Row entries = 1; + } + + message Admin { + raft_cmdpb.AdminRequest admin_request = 1; + raft_cmdpb.AdminResponse admin_response = 2; + } + + message LongTxn { + repeated TxnInfo txn_info = 1; + } + + uint64 region_id = 1; + uint64 index = 2; + uint64 request_id = 7; + oneof event { + Entries entries = 3; + Admin admin = 4; + Error error = 5; + uint64 resolved_ts = 6 [deprecated=true]; + // Note that field 7 is taken by request_id. + LongTxn long_txn = 8; + // More region level events ... + } +} + +message ChangeDataEvent { + repeated Event events = 1; + ResolvedTs resolved_ts = 2; + // More store level events ... +} + +message ResolvedTs { + repeated uint64 regions = 1; + uint64 ts = 2; +} + +message ChangeDataRequest { + message Register {} + + message NotifyTxnStatus { + repeated TxnStatus txn_status = 1; + } + + Header header = 1; + uint64 region_id = 2; + metapb.RegionEpoch region_epoch = 3; + + uint64 checkpoint_ts = 4; + bytes start_key = 5; + bytes end_key = 6; + // Used for CDC to identify events corresponding to different requests. + uint64 request_id = 7; + + kvrpcpb.ExtraOp extra_op = 8; + + oneof request { + // A normal request that trying to register change data feed on a region. + Register register = 9; + + // Notify the region that some of the running transactions on the region has a pushed + // min_commit_ts so that the resolved_ts can be advanced. + NotifyTxnStatus notify_txn_status = 10; + } +} + +service ChangeData { + rpc EventFeed(stream ChangeDataRequest) returns(stream ChangeDataEvent); +} diff --git a/tikv-client-proto/proto/configpb.proto b/tikv-client-proto/proto/configpb.proto new file mode 100644 index 00000000..0276328c --- /dev/null +++ b/tikv-client-proto/proto/configpb.proto @@ -0,0 +1,153 @@ +syntax = "proto3"; +package configpb; + +import "gogoproto/gogo.proto"; +import "rustproto.proto"; +import "google/api/annotations.proto"; + +option (gogoproto.sizer_all) = true; +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (rustproto.lite_runtime_all) = true; + +service Config { + rpc Create(CreateRequest) returns (CreateResponse) {} + rpc GetAll(GetAllRequest) returns (GetAllResponse) {} + rpc Get(GetRequest) returns (GetResponse) { + option (google.api.http) = { + get: "/component" + }; + } + rpc Update(UpdateRequest) returns (UpdateResponse) { + option (google.api.http) = { + post: "/component" + body: "*" + }; + } + rpc Delete(DeleteRequest) returns (DeleteResponse) { + option (google.api.http) = { + delete: "/component" + }; + } +} + +enum StatusCode { + UNKNOWN = 0; + OK = 1; + WRONG_VERSION = 2; + NOT_CHANGE = 3; + COMPONENT_NOT_FOUND = 4; + COMPONENT_ID_NOT_FOUND = 5; +} + +message Status { + StatusCode code = 1; + string message = 2; +} + +// The version is used to tell the configuration which can be shared +// or not apart. +// Global version represents the version of these configuration +// which can be shared, each kind of component only have one. +// For local version, every component will have one to represent +// the version of these configuration which cannot be shared. +message Version { + uint64 local = 1; + uint64 global = 2; +} + +message Local { + string component_id = 1; +} + +message Global { + string component = 1; +} + +message ConfigKind { + oneof kind { + Local local = 1; + Global global = 2; + } +} + +message ConfigEntry { + string name = 1; + string value = 2; +} + +message LocalConfig { + Version version = 1; + string component = 2; + string component_id = 3; + string config = 4; +} + +message Header { + uint64 cluster_id = 1; +} + +message CreateRequest { + Header header = 1; + Version version = 2; + string component = 3; + string component_id = 4; + string config = 5; +} + +message CreateResponse { + Header header = 1; + Status status = 2; + Version version = 3; + string config = 4; +} + +message GetAllRequest { + Header header = 1; +} + +message GetAllResponse { + Header header = 1; + Status status = 2; + repeated LocalConfig local_configs = 3; +} + +message GetRequest { + Header header = 1; + Version version = 2; + string component = 3; + string component_id = 4; +} + +message GetResponse { + Header header = 1; + Status status = 2; + Version version = 3; + string config = 4; +} + +message UpdateRequest { + Header header = 1; + Version version = 2; + ConfigKind kind = 3; + repeated ConfigEntry entries = 4; +} + +message UpdateResponse { + Header header = 1; + Status status = 2; + Version version = 3; + string config = 4; +} + +message DeleteRequest { + Header header = 1; + Version version = 2; + ConfigKind kind = 3; +} + +message DeleteResponse { + Header header = 1; + Status status = 2; + Version version = 3; +} diff --git a/tikv-client-proto/proto/coprocessor.proto b/tikv-client-proto/proto/coprocessor.proto new file mode 100644 index 00000000..ee6b30ef --- /dev/null +++ b/tikv-client-proto/proto/coprocessor.proto @@ -0,0 +1,75 @@ +syntax = "proto3"; +package coprocessor; + +import "errorpb.proto"; +import "kvrpcpb.proto"; +import "gogoproto/gogo.proto"; +import "rustproto.proto"; +import "metapb.proto"; +import "span.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (rustproto.lite_runtime_all) = true; + +option java_package = "org.tikv.kvproto"; + + +// [start, end) +message KeyRange { + bytes start = 1; + bytes end = 2; +} + +message Request { + kvrpcpb.Context context = 1; + int64 tp = 2; + bytes data = 3; + uint64 start_ts = 7; + repeated KeyRange ranges = 4; + + // If cache is enabled, TiKV returns cache hit instead of data if + // its last version matches this `cache_if_match_version`. + bool is_cache_enabled = 5; + uint64 cache_if_match_version = 6; + // Any schema-ful storage to validate schema correctness if necessary. + int64 schema_ver = 8; + bool is_trace_enabled = 9; +} + +message Response { + bytes data = 1 [(gogoproto.customtype) = "github.com/pingcap/kvproto/pkg/sharedbytes.SharedBytes", (gogoproto.nullable) = false]; + errorpb.Error region_error = 2; + kvrpcpb.LockInfo locked = 3; + string other_error = 4; + KeyRange range = 5; + kvrpcpb.ExecDetails exec_details = 6; + + bool is_cache_hit = 7; + uint64 cache_last_version = 8; + bool can_be_cached = 9; + repeated span.SpanSet spans = 10; +} + +message RegionInfo { + uint64 region_id = 1; + metapb.RegionEpoch region_epoch = 2; + repeated KeyRange ranges = 3; +} + +message BatchRequest { + kvrpcpb.Context context = 1; + int64 tp = 2; + bytes data = 3; + repeated RegionInfo regions = 4; + uint64 start_ts = 5; + // Any schema-ful storage to validate schema correctness if necessary. + int64 schema_ver = 6; +} + +message BatchResponse { + bytes data = 1 [(gogoproto.customtype) = "github.com/pingcap/kvproto/pkg/sharedbytes.SharedBytes", (gogoproto.nullable) = false]; + string other_error = 2; + kvrpcpb.ExecDetails exec_details = 3; +} diff --git a/tikv-client-proto/proto/deadlock.proto b/tikv-client-proto/proto/deadlock.proto new file mode 100644 index 00000000..58296165 --- /dev/null +++ b/tikv-client-proto/proto/deadlock.proto @@ -0,0 +1,51 @@ +syntax = "proto3"; +package deadlock; + +import "gogoproto/gogo.proto"; + +message WaitForEntriesRequest { +} + +message WaitForEntriesResponse { + repeated WaitForEntry entries = 1 [(gogoproto.nullable) = false]; +} + +message WaitForEntry { + // The transaction id that is waiting. + uint64 txn = 1; + // The transaction id that is being waited for. + uint64 wait_for_txn = 2; + // The hash value of the key is being waited for. + uint64 key_hash = 3; +} + +enum DeadlockRequestType { + Detect = 0; + // CleanUpWaitFor cleans a single entry the transaction is waiting. + CleanUpWaitFor = 1; + // CleanUp cleans all entries the transaction is waiting. + CleanUp = 2; +} + +message DeadlockRequest { + DeadlockRequestType tp = 1; + WaitForEntry entry = 2 [(gogoproto.nullable) = false]; +} + +message DeadlockResponse { + // The same entry sent by DeadlockRequest, identifies the sender. + WaitForEntry entry = 1 [(gogoproto.nullable) = false]; + // The key hash of the lock that is hold by the waiting transaction. + uint64 deadlock_key_hash = 2; +} + +service Deadlock { + // Get local wait for entries, should be handle by every node. + // The owner should sent this request to all members to build the complete wait for graph. + rpc GetWaitForEntries(WaitForEntriesRequest) returns (WaitForEntriesResponse) {} + + // Detect should only sent to the owner. only be handled by the owner. + // The DeadlockResponse is sent back only if there is deadlock detected. + // CleanUpWaitFor and CleanUp doesn't return responses. + rpc Detect(stream DeadlockRequest) returns (stream DeadlockResponse) {} +} diff --git a/tikv-client-proto/proto/debugpb.proto b/tikv-client-proto/proto/debugpb.proto new file mode 100644 index 00000000..db5449c9 --- /dev/null +++ b/tikv-client-proto/proto/debugpb.proto @@ -0,0 +1,250 @@ +syntax = "proto3"; +package debugpb; + +import "eraftpb.proto"; +import "kvrpcpb.proto"; +import "raft_serverpb.proto"; +import "gogoproto/gogo.proto"; +import "rustproto.proto"; + +option (gogoproto.sizer_all) = true; +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (rustproto.lite_runtime_all) = true; + +option java_package = "org.tikv.kvproto"; + +// Debug service for TiKV. +// +// Errors are defined as follow: +// - OK: Okay, we are good! +// - UNKNOWN: For unknown error. +// - INVALID_ARGUMENT: Something goes wrong within requests. +// - NOT_FOUND: It is key or region not found, it's based on context, detailed +// reason can be found in grpc message. +// Note: It bypasses raft layer. +service Debug { + // Read a value arbitrarily for a key. + // Note: Server uses key directly w/o any encoding. + rpc Get(GetRequest) returns (GetResponse) {} + + // Read raft info. + rpc RaftLog(RaftLogRequest) returns (RaftLogResponse) {} + rpc RegionInfo(RegionInfoRequest) returns (RegionInfoResponse) {} + + // Calculate size of a region. + // Note: DO NOT CALL IT IN PRODUCTION, it's really expensive. + rpc RegionSize(RegionSizeRequest) returns (RegionSizeResponse) {} + + // Scan a specific range. + // Note: DO NOT CALL IT IN PRODUCTION, it's really expensive. + // Server uses keys directly w/o any encoding. + rpc ScanMvcc(ScanMvccRequest) returns (stream ScanMvccResponse) {} + + // Compact a column family in a specified range. + // Note: Server uses keys directly w/o any encoding. + rpc Compact(CompactRequest) returns (CompactResponse) {} + + // Inject a fail point. Currently, it's only used in tests. + // Note: DO NOT CALL IT IN PRODUCTION. + rpc InjectFailPoint(InjectFailPointRequest) returns (InjectFailPointResponse) {} + // Recover from a fail point. + rpc RecoverFailPoint(RecoverFailPointRequest) returns (RecoverFailPointResponse) {} + // List all fail points. + rpc ListFailPoints(ListFailPointsRequest) returns (ListFailPointsResponse) {} + + // Get Metrics + rpc GetMetrics(GetMetricsRequest) returns (GetMetricsResponse){} + + // Do a consistent check for a region. + rpc CheckRegionConsistency(RegionConsistencyCheckRequest) returns (RegionConsistencyCheckResponse) {} + + // dynamically modify tikv's config + rpc ModifyTikvConfig(ModifyTikvConfigRequest) returns (ModifyTikvConfigResponse) {} + + // Get region properties + rpc GetRegionProperties(GetRegionPropertiesRequest) returns (GetRegionPropertiesResponse) {} + + // Get store ID + rpc GetStoreInfo(GetStoreInfoRequest) returns (GetStoreInfoResponse) {} + + // Get cluster ID + rpc GetClusterInfo(GetClusterInfoRequest) returns (GetClusterInfoResponse) {} +} + +enum DB { + INVALID = 0; + KV = 1; + RAFT = 2; +} + +enum MODULE { + UNUSED = 0; + KVDB = 1; + RAFTDB = 2; + READPOOL = 3; + SERVER = 4; + STORAGE = 5; + PD = 6; + METRIC = 7; + COPROCESSOR = 8; + SECURITY = 9; + IMPORT = 10; +} + +message GetRequest { + DB db = 1; + string cf = 2; + bytes key = 3; +} + +message GetResponse { + bytes value = 1; +} + +message RaftLogRequest { + uint64 region_id = 1; + uint64 log_index = 2; +} + +message RaftLogResponse { + eraftpb.Entry entry = 1; +} + +message RegionInfoRequest { + uint64 region_id = 1; +} + +message RegionInfoResponse { + raft_serverpb.RaftLocalState raft_local_state = 1; + raft_serverpb.RaftApplyState raft_apply_state = 2; + raft_serverpb.RegionLocalState region_local_state = 3; +} + +message RegionSizeRequest { + uint64 region_id = 1; + repeated string cfs = 2; +} + +message RegionSizeResponse { + message Entry { + string cf = 1; + uint64 size = 2; + } + + repeated Entry entries = 1; +} + +message ScanMvccRequest { + bytes from_key = 1; + bytes to_key = 2; + uint64 limit = 3; +} + +message ScanMvccResponse { + bytes key = 1; + kvrpcpb.MvccInfo info = 2; +} + +enum BottommostLevelCompaction { + // Skip bottommost level compaction + Skip = 0; + // Force bottommost level compaction + Force = 1; + // Compact bottommost level if there is a compaction filter. + IfHaveCompactionFilter = 2; +} + +message CompactRequest { + DB db = 1; + string cf = 2; + bytes from_key = 3; + bytes to_key = 4; + uint32 threads = 5; + BottommostLevelCompaction bottommost_level_compaction = 6; +} + +message CompactResponse { +} + +message InjectFailPointRequest { + string name = 1; + string actions = 2; +} + +message InjectFailPointResponse { +} + +message RecoverFailPointRequest { + string name = 1; +} + +message RecoverFailPointResponse { +} + +message ListFailPointsRequest { +} + +message ListFailPointsResponse { + message Entry { + string name = 1; + string actions = 2; + } + + repeated Entry entries = 1; +} + +message GetMetricsRequest { + bool all = 1; +} + +message GetMetricsResponse { + string prometheus = 1; + string rocksdb_kv = 2; + string rocksdb_raft = 3; + string jemalloc = 4; + uint64 store_id = 5; +} + +message RegionConsistencyCheckRequest { + uint64 region_id = 1; +} + +message RegionConsistencyCheckResponse { +} + +message ModifyTikvConfigRequest { + MODULE module = 1; + string config_name = 2; + string config_value = 3; +} + +message ModifyTikvConfigResponse { +} + +message Property { + string name = 1; + string value = 2; +} + +message GetRegionPropertiesRequest { + uint64 region_id = 1; +} + +message GetRegionPropertiesResponse { + repeated Property props = 1; +} + +message GetStoreInfoRequest { +} + +message GetStoreInfoResponse { + uint64 store_id = 1; +} + +message GetClusterInfoRequest { +} + +message GetClusterInfoResponse { + uint64 cluster_id = 1; +} diff --git a/tikv-client-proto/proto/diagnosticspb.proto b/tikv-client-proto/proto/diagnosticspb.proto new file mode 100644 index 00000000..548b63ad --- /dev/null +++ b/tikv-client-proto/proto/diagnosticspb.proto @@ -0,0 +1,92 @@ +syntax = "proto3"; +package diagnosticspb; + +import "gogoproto/gogo.proto"; +import "rustproto.proto"; + +option (gogoproto.sizer_all) = true; +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (rustproto.lite_runtime_all) = true; + +option java_package = "org.tikv.kvproto"; + +// Diagnostics service for TiDB cluster components. +service Diagnostics { + // Searchs log in the target node + rpc search_log(SearchLogRequest) returns (stream SearchLogResponse) {}; + // Retrieves server info in the target node + rpc server_info(ServerInfoRequest) returns (ServerInfoResponse) {}; +} + +enum LogLevel { + UNKNOWN = 0; + Debug = 1; + Info = 2; + Warn = 3; + Trace = 4; + Critical = 5; + Error = 6; +} + +message SearchLogRequest { + enum Target { + Normal = 0; + Slow = 1; + } + int64 start_time = 1; + int64 end_time = 2; + repeated LogLevel levels = 3; + // We use a string array to represent multiple CNF pattern sceniaor like: + // SELECT * FROM t WHERE c LIKE '%s%' and c REGEXP '.*a.*' because + // Golang and Rust don't support perl-like (?=re1)(?=re2) + repeated string patterns = 4; + Target target = 5; +} + +message SearchLogResponse { + repeated LogMessage messages = 1; +} + +message LogMessage { + int64 time = 1; + LogLevel level = 2; + string message = 3; +} + +enum ServerInfoType { + All = 0; + HardwareInfo = 1; + SystemInfo = 2; + LoadInfo = 3; +} + +message ServerInfoRequest { + ServerInfoType tp = 1; +} + +message ServerInfoPair { + string key = 1; + string value = 2; +} + +message ServerInfoItem { + // cpu, memory, disk, network ... + string tp = 1; + // eg. network: lo1/eth0, cpu: core1/core2, disk: sda1/sda2 + string name = 2; + // all key-value pairs for specified item, e.g: + // ServerInfoItem { + // tp = "network" + // name = "eth0" + // paris = [ + // ServerInfoPair { key = "readbytes", value = "4k"}, + // ServerInfoPair { key = "writebytes", value = "1k"}, + // ] + // } + repeated ServerInfoPair pairs = 3; +} + +message ServerInfoResponse { + repeated ServerInfoItem items = 1; +} diff --git a/tikv-client-proto/proto/encryptionpb.proto b/tikv-client-proto/proto/encryptionpb.proto new file mode 100644 index 00000000..880bc5d0 --- /dev/null +++ b/tikv-client-proto/proto/encryptionpb.proto @@ -0,0 +1,110 @@ +syntax = "proto3"; +package encryptionpb; + +import "gogoproto/gogo.proto"; +import "rustproto.proto"; + +option (gogoproto.sizer_all) = true; +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (rustproto.lite_runtime_all) = true; + +option java_package = "org.tikv.kvproto"; + +// General encryption metadata for any data type. +message EncryptionMeta { + // ID of the key used to encrypt the data. + uint64 key_id = 1; + // Initialization vector (IV) of the data. + bytes iv = 2; +} + +// Information about an encrypted file. +message FileInfo { + // ID of the key used to encrypt the file. + uint64 key_id = 1; + // Initialization vector (IV) of the file. + bytes iv = 2; + // Method of encryption algorithm used to encrypted the file. + EncryptionMethod method = 3; +} + +message FileDictionary { + // A map of file name to file info. + map files = 1; +} + +enum EncryptionMethod { + UNKNOWN = 0; + PLAINTEXT = 1; + AES128_CTR = 2; + AES192_CTR = 3; + AES256_CTR = 4; +} + +// The key used to encrypt the user data. +message DataKey { + // A sequence of secret bytes used to encrypt data. + bytes key = 1; + // Method of encryption algorithm used to encrypted data. + EncryptionMethod method = 2; + // Creation time of the key. + uint64 creation_time = 3; + // A flag for the key have ever been exposed. + bool was_exposed = 4; +} + +message KeyDictionary { + // A map of key ID to dat key. + map keys = 1; + // ID of a key currently in use. + uint64 current_key_id = 2; +} + +// Master key config. +message MasterKey { + oneof backend { + MasterKeyPlaintext plaintext = 1; + MasterKeyFile file = 2; + MasterKeyKms kms = 3; + } +} + +// MasterKeyPlaintext indicates content is stored as plaintext. +message MasterKeyPlaintext {} + +// MasterKeyFile is a master key backed by a file containing encryption key in human-readable +// hex format. +message MasterKeyFile { + // Local file path. + string path = 1; +} + +// MasterKeyKms is a master key backed by KMS service that manages the encryption key, +// and provide API to encrypt and decrypt a data key, which is used to encrypt the content. +message MasterKeyKms { + // KMS vendor. + string vendor = 1; + // KMS key id. + string key_id = 2; + // KMS region. + string region = 3; + // KMS endpoint. Normally not needed. + string endpoint = 4; +} + +message EncryptedContent { + // Metadata of the encrypted content. + // Eg. IV, method and KMS key ID + // It is preferred to define new fields for extra metadata than using this metadata map. + map metadata = 1; + // Encrypted content. + bytes content = 2; + // Master key used to encrypt the content. + MasterKey master_key = 3; + // Initilization vector (IV) used. + bytes iv = 4; + // Encrypted data key generated by KMS and used to actually encrypt data. + // Valid only when KMS is used. + bytes ciphertext_key = 5; +} diff --git a/tikv-client-proto/proto/enginepb.proto b/tikv-client-proto/proto/enginepb.proto new file mode 100644 index 00000000..c0eb17c7 --- /dev/null +++ b/tikv-client-proto/proto/enginepb.proto @@ -0,0 +1,86 @@ +syntax = "proto3"; +package enginepb; + +import "metapb.proto"; +import "raft_cmdpb.proto"; +import "raft_serverpb.proto"; + +message CommandRequestHeader { + uint64 region_id = 1; + uint64 index = 2; + uint64 term = 3; + + // Flush in-memory data to disk. + bool sync_log = 4; + // Destroy the region. + bool destroy = 5; + + // Additional information for the request. + bytes context = 6; +} + +message CommandRequest { + CommandRequestHeader header = 1; + + // We don't enclose normal requests and administrator request + // at same time. + + // kv put / delete + repeated raft_cmdpb.Request requests = 2; + + // region metadata manipulation command. + raft_cmdpb.AdminRequest admin_request = 3; + // region metadata manipulation result. + raft_cmdpb.AdminResponse admin_response = 4; +} + +message CommandRequestBatch { + repeated CommandRequest requests = 1; +} + +message CommandResponseHeader { + uint64 region_id = 1; + // Region is destroyed. + bool destroyed = 2; +} + +message CommandResponse { + CommandResponseHeader header = 1; + + raft_serverpb.RaftApplyState apply_state = 2; + uint64 applied_term = 3; +} + +message CommandResponseBatch { + repeated CommandResponse responses = 1; +} + +message SnapshotState { + metapb.Region region = 1; + metapb.Peer peer = 2; + raft_serverpb.RaftApplyState apply_state = 3; +} + +message SnapshotData { + string cf = 1; + uint32 checksum = 2; + repeated raft_serverpb.KeyValue data = 3; +} + +message SnapshotRequest { + oneof chunk { + // The first message for snapshots. + // It contains the latest region information after applied snapshot. + SnapshotState state = 1; + + // Following messages are always data. + SnapshotData data = 2; + } +} + +message SnapshotDone {} + +service Engine { + rpc ApplyCommandBatch(stream CommandRequestBatch) returns (stream CommandResponseBatch) {} + rpc ApplySnapshot(stream SnapshotRequest) returns (SnapshotDone) {} +} diff --git a/tikv-client-proto/proto/errorpb.proto b/tikv-client-proto/proto/errorpb.proto new file mode 100644 index 00000000..47c606a9 --- /dev/null +++ b/tikv-client-proto/proto/errorpb.proto @@ -0,0 +1,106 @@ +syntax = "proto3"; +package errorpb; + +import "metapb.proto"; +import "gogoproto/gogo.proto"; +import "rustproto.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (rustproto.lite_runtime_all) = true; + +option java_package = "org.tikv.kvproto"; + +// NotLeader is the error variant that tells a request be handle by raft leader +// is sent to raft follower or learner. +message NotLeader { + // The requested region ID + uint64 region_id = 1; + // Region leader of the requested region + metapb.Peer leader = 2; +} + +// StoreNotMatch is the error variant that tells the request is sent to wrong store. +// (i.e. inconsistency of the store ID that request shows and the real store ID of this server.) +message StoreNotMatch { + // Store id in request + uint64 request_store_id = 1; + // Actual store id + uint64 actual_store_id = 2; +} + +// RegionNotFound is the error variant that tells there isn't any region in this TiKV +// matches the requested region ID. +message RegionNotFound { + // The requested region ID + uint64 region_id = 1; +} + +// KeyNotInRegion is the error variant that tells the key the request requires isn't present in +// this region. +message KeyNotInRegion { + // The requested key + bytes key = 1; + // The requested region ID + uint64 region_id = 2; + // Start key of the requested region + bytes start_key = 3; + // Snd key of the requested region + bytes end_key = 4; +} + +// EpochNotMatch is the error variant that tells a region has been updated. +// (e.g. by splitting / merging, or raft Confchange.) +// Hence, a command is based on a stale version of a region. +message EpochNotMatch { + // Available regions that may be siblings of the requested one. + repeated metapb.Region current_regions = 1; +} + +// ServerIsBusy is the error variant that tells the server is too busy to response. +message ServerIsBusy { + string reason = 1; + // The suggested backoff time + uint64 backoff_ms = 2; +} + +// StaleCommand is the error variant that tells the command is stale, that is, +// the current request term is lower than current raft term. +// This can be retried at most time. +message StaleCommand { +} + +// RaftEntryTooLarge is the error variant that tells the request is too large to be serialized to a +// reasonable small raft entry. +// (i.e. greater than the configured value `raft_entry_max_size` in `raftstore`) +message RaftEntryTooLarge { + // The requested region ID + uint64 region_id = 1; + // Size of the raft entry + uint64 entry_size = 2; +} + +// MaxTimestampNotSynced is the error variant that tells the peer has just become a leader and +// updating the max timestamp in the concurrency manager from PD TSO is ongoing. In this case, +// the prewrite of an async commit transaction cannot succeed. The client can backoff and +// resend the request. +message MaxTimestampNotSynced { +} + +// Error wraps all region errors, indicates an error encountered by a request. +message Error { + reserved "stale_epoch"; + + // The error message + string message = 1; + NotLeader not_leader = 2; + RegionNotFound region_not_found = 3; + KeyNotInRegion key_not_in_region = 4; + EpochNotMatch epoch_not_match = 5; + ServerIsBusy server_is_busy = 6; + StaleCommand stale_command = 7; + StoreNotMatch store_not_match = 8; + RaftEntryTooLarge raft_entry_too_large = 9; + MaxTimestampNotSynced max_timestamp_not_synced = 10; +} diff --git a/tikv-client-proto/proto/import_kvpb.proto b/tikv-client-proto/proto/import_kvpb.proto new file mode 100644 index 00000000..fc72adf1 --- /dev/null +++ b/tikv-client-proto/proto/import_kvpb.proto @@ -0,0 +1,168 @@ +syntax = "proto3"; + +package import_kvpb; + +import "import_sstpb.proto"; +import "gogoproto/gogo.proto"; +import "rustproto.proto"; + +option (gogoproto.sizer_all) = true; +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (rustproto.lite_runtime_all) = true; + +option java_package = "org.tikv.kvproto"; + +// ImportKV provides a service to import key-value pairs to TiKV. +// +// In order to import key-value pairs to TiKV, the user should: +// 1. Open an engine identified by an UUID. +// 2. Open write streams to write key-value batches to the opened engine. +// Different streams/clients can write to the same engine concurrently. +// 3. Close the engine after all write batches have been finished. An +// engine can only be closed when all write streams are closed. An +// engine can only be closed once, and it can not be opened again +// once it is closed. +// 4. Import the data in the engine to the target cluster. Note that +// the import process is not atomic, it requires the data to be +// idempotent on retry. An engine can only be imported after it is +// closed. An engine can be imported multiple times, but can not be +// imported concurrently. +// 5. Clean up the engine after it has been imported. Delete all data +// in the engine. An engine can not be cleaned up when it is +// writing or importing. +service ImportKV { + // Switch the target cluster to normal/import mode. + rpc SwitchMode(SwitchModeRequest) returns (SwitchModeResponse) {} + // Open an engine. + rpc OpenEngine(OpenEngineRequest) returns (OpenEngineResponse) {} + // Open a write stream to the engine. + rpc WriteEngine(stream WriteEngineRequest) returns (WriteEngineResponse) {} + // Write to engine, single message version + rpc WriteEngineV3(WriteEngineV3Request) returns (WriteEngineResponse) {} + // Close the engine. + rpc CloseEngine(CloseEngineRequest) returns (CloseEngineResponse) {} + // Import the engine to the target cluster. + rpc ImportEngine(ImportEngineRequest) returns (ImportEngineResponse) {} + // Clean up the engine. + rpc CleanupEngine(CleanupEngineRequest) returns (CleanupEngineResponse) {} + // Compact the target cluster for better performance. + rpc CompactCluster(CompactClusterRequest) returns (CompactClusterResponse) {} + // Get current version and commit hash + rpc GetVersion(GetVersionRequest) returns (GetVersionResponse) {} + // Get importer metrics + rpc GetMetrics(GetMetricsRequest) returns (GetMetricsResponse) {} +} + +message SwitchModeRequest { + string pd_addr = 1; + import_sstpb.SwitchModeRequest request = 2; +} + +message SwitchModeResponse { +} + +message OpenEngineRequest { + bytes uuid = 1; + bytes key_prefix = 2; +} + +message OpenEngineResponse { +} + +message WriteHead { + bytes uuid = 1; +} + +message Mutation { + enum OP { + Put = 0; + } + OP op = 1; + bytes key = 2; + bytes value = 3; +} + +message WriteBatch { + uint64 commit_ts = 1; + repeated Mutation mutations = 2; +} + +message WriteEngineRequest { + oneof chunk { + WriteHead head = 1; + WriteBatch batch = 2; + } +} + +message KVPair { + bytes key = 1; + bytes value = 2; +} + +message WriteEngineV3Request { + bytes uuid = 1; + uint64 commit_ts = 2; + repeated KVPair pairs = 3; +} + +message WriteEngineResponse { + Error error = 1; +} + +message CloseEngineRequest { + bytes uuid = 1; +} + +message CloseEngineResponse { + Error error = 1; +} + +message ImportEngineRequest { + bytes uuid = 1; + string pd_addr = 2; +} + +message ImportEngineResponse { +} + +message CleanupEngineRequest { + bytes uuid = 1; +} + +message CleanupEngineResponse { +} + +message CompactClusterRequest { + string pd_addr = 1; + import_sstpb.CompactRequest request = 2; +} + +message CompactClusterResponse { +} + +message GetVersionRequest { +} + +message GetVersionResponse { + string version = 1; + string commit = 2; +} + +message GetMetricsRequest { +} + +message GetMetricsResponse { + string prometheus = 1; +} + +message Error { + message EngineNotFound { + bytes uuid = 1; + } + // This can happen if the client hasn't opened the engine, or the server + // restarts while the client is writing or closing. An unclosed engine will + // be removed on server restart, so the client should not continue but + // restart the previous job in that case. + EngineNotFound engine_not_found = 1; +} diff --git a/tikv-client-proto/proto/import_sstpb.proto b/tikv-client-proto/proto/import_sstpb.proto new file mode 100644 index 00000000..78245287 --- /dev/null +++ b/tikv-client-proto/proto/import_sstpb.proto @@ -0,0 +1,210 @@ +syntax = "proto3"; + +package import_sstpb; + +import "metapb.proto"; +import "errorpb.proto"; +import "kvrpcpb.proto"; +import "gogoproto/gogo.proto"; +import "rustproto.proto"; +import "backup.proto"; + +option (gogoproto.sizer_all) = true; +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (rustproto.lite_runtime_all) = true; + +option java_package = "org.tikv.kvproto"; + +// ImportSST provides a service to import a generated SST file to a region in TiKV. +// +// In order to import an SST file to a region, the user should: +// 1. Retrieve the meta of the region according to the SST file's range. +// 2. Upload the SST file to the servers where the region's peers locate in. +// 3. Issue an ingest request to the region's leader with the SST file's metadata. +// +// It's the user's responsibility to make sure that the SST file is uploaded to +// the servers where the region's peers locate in, before issue the ingest +// request to the region's leader. However, the region can be scheduled (so the +// location of the region's peers will be changed) or split/merged (so the range +// of the region will be changed), after the SST file is uploaded, but before +// the SST file is ingested. So, the region's epoch is provided in the SST +// file's metadata, to guarantee that the region's epoch must be the same +// between the SST file is uploaded and ingested later. +service ImportSST { + // Switch to normal/import mode. + rpc SwitchMode(SwitchModeRequest) returns (SwitchModeResponse) {} + // Upload an SST file to a server. + rpc Upload(stream UploadRequest) returns (UploadResponse) {} + // Ingest an uploaded SST file to a region. + rpc Ingest(IngestRequest) returns (IngestResponse) {} + // Compact the specific range for better performance. + rpc Compact(CompactRequest) returns (CompactResponse) {} + + rpc SetDownloadSpeedLimit(SetDownloadSpeedLimitRequest) returns (SetDownloadSpeedLimitResponse) {} + // Download an SST file from an external storage, and performs key-rewrite + // after downloading. + rpc Download(DownloadRequest) returns (DownloadResponse) {} + + // Open a write stream to generate sst files + rpc Write(stream WriteRequest) returns (WriteResponse) {} +} + +enum SwitchMode { + Normal = 0; + Import = 1; +} + +message SwitchModeRequest { + SwitchMode mode = 1; +} + +message SwitchModeResponse { +} + +message Range { + bytes start = 1; + bytes end = 2; +} + +message SSTMeta { + bytes uuid = 1; + Range range = 2; + uint32 crc32 = 3; + uint64 length = 4; + string cf_name = 5; + uint64 region_id = 6; + metapb.RegionEpoch region_epoch = 7; + bool end_key_exclusive = 8; +} + +// A rewrite rule is applied on the *encoded* keys (the internal storage +// representation). +message RewriteRule { + bytes old_key_prefix = 1; + bytes new_key_prefix = 2; + uint64 new_timestamp = 3; +} + +message UploadRequest { + oneof chunk { + SSTMeta meta = 1; + bytes data = 2; + } +} + +message UploadResponse { +} + +message IngestRequest { + kvrpcpb.Context context = 1; + SSTMeta sst = 2; +} + +message IngestResponse { + errorpb.Error error = 1; +} + +message CompactRequest { + // Compact files in the range and above the output level. + // Compact all files if the range is not specified. + // Compact all files to the bottommost level if the output level is -1. + Range range = 1; + int32 output_level = 2; +} + +message CompactResponse { +} + +message DownloadRequest { + // The SST meta used to identify the downloaded file. + // Must be the same among all nodes in the same Raft group. + // Note: the "crc32" and "cf_name" fields are ignored in this request, + // and the "range" field represents the closed key range after rewrite + // (as origin keys in encoded representation). + SSTMeta sst = 2 [(gogoproto.nullable) = false]; + + // The url field is deprecated, use storage_backend instead + reserved 8; reserved "url"; + + // The file name of the SST file. + string name = 9; + + // Performs a key prefix rewrite after downloading the SST file. + // All keys in the SST will be rewritten as: + // + // new_key = new_key_prefix + old_key[len(old_key_prefix)..] + // + // When used for TiDB, rewriting the prefix changes the table ID. Please + // note that key-rewrite is applied on the origin keys in encoded + // representation (the SST itself should still use data keys in encoded + // representation). + // + // You need to ensure that the keys before and after rewriting are in the + // same order, otherwise the RPC request will fail. + RewriteRule rewrite_rule = 13 [(gogoproto.nullable) = false]; + + backup.StorageBackend storage_backend = 14; + + bool is_raw_kv = 15; +} + +// For now it is just used for distinguishing the error of the request with the error +// of gRPC, add more concrete types if it is necessary later. +message Error { + string message = 1; +} + +message DownloadResponse { + // The actual key range (after rewrite) of the downloaded SST. The range is + // inclusive in both ends. + Range range = 1 [(gogoproto.nullable) = false]; + + // Whether the SST is empty. An empty SST is prohibited in TiKV, do not + // ingest if this field is true. + // (Deprecated, should be replaced by checking `length == 0` in the future) + bool is_empty = 2; + + Error error = 3; + + // The CRC32 checksum of the rewritten SST file (implementation can return + // zero, indicating the CRC32 was not calculated). + uint32 crc32 = 4; + // The actual length of the rewritten SST file. + uint64 length = 5; +} + +message SetDownloadSpeedLimitRequest { + // The download speed limit (bytes/second). Set to 0 for unlimited speed. + uint64 speed_limit = 1; +} + +message SetDownloadSpeedLimitResponse { +} + +message Pair { + bytes key = 1; + bytes value = 2; + enum OP { + Put = 0; + Delete = 1; + } + OP op = 3; +} + +message WriteBatch { + uint64 commit_ts = 1; + repeated Pair pairs = 2; +} + +message WriteRequest { + oneof chunk { + SSTMeta meta = 1; + WriteBatch batch = 2; + } +} + +message WriteResponse { + Error error = 1; + repeated SSTMeta metas = 2; +} diff --git a/tikv-client-proto/proto/kvrpcpb.proto b/tikv-client-proto/proto/kvrpcpb.proto new file mode 100644 index 00000000..c5cafd44 --- /dev/null +++ b/tikv-client-proto/proto/kvrpcpb.proto @@ -0,0 +1,931 @@ +syntax = "proto3"; +package kvrpcpb; + +import "metapb.proto"; +import "errorpb.proto"; +import "gogoproto/gogo.proto"; +import "rustproto.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (rustproto.lite_runtime_all) = true; + +option java_package = "org.tikv.kvproto"; + +// This proto file defines requests, responses, and helper messages for KV and raw +// APIs of TiKV (see tikvpb.proto). + +// Transactional commands. + +// A transactional get command. Lookup a value for `key` in the transaction with +// starting timestamp = `version`. +message GetRequest { + Context context = 1; + bytes key = 2; + uint64 version = 3; +} + +message GetResponse { + // A region error indicates that the request was sent to the wrong TiKV node + // (or other, similar errors). + errorpb.Error region_error = 1; + // A value could not be retrieved due to the state of the database for the requested key. + KeyError error = 2; + // A successful result. + bytes value = 3; + // True if the key does not exist in the database. + bool not_found = 4; + // Set if ctx.scan_detail = true or meet slow query. + ScanDetailV2 scan_detail_v2 = 5; +} + +// Scan fetches values for a range of keys; it is part of the transaction with +// starting timestamp = `version`. +message ScanRequest { + Context context = 1; + bytes start_key = 2; + // The maximum number of results to return. + uint32 limit = 3; + uint64 version = 4; + // Return only the keys found by scanning, not their values. + bool key_only = 5; + bool reverse = 6; + // For compatibility, when scanning forward, the range to scan is [start_key, end_key), where start_key < end_key; + // and when scanning backward, it scans [end_key, start_key) in descending order, where end_key < start_key. + bytes end_key = 7; + // If sample_step > 0, skips 'sample_step - 1' number of keys after each returned key. + // locks are not checked. + uint32 sample_step = 8; +} + +message ScanResponse { + errorpb.Error region_error = 1; + // Each KvPair may contain a key error. + repeated KvPair pairs = 2; +} + +// A prewrite is the first phase of writing to TiKV. It contains all data to be written in a transaction. +// TiKV will write the data in a preliminary state. Data cannot be read until it has been committed. +// The client should only commit a transaction once all prewrites succeed. +message PrewriteRequest { + Context context = 1; + // The data to be written to the database. + repeated Mutation mutations = 2; + // The client picks one key to be primary (unrelated to the primary key concept in SQL). This + // key's lock is the source of truth for the state of a transaction. All other locks due to a + // transaction will point to the primary lock. + bytes primary_lock = 3; + // Identifies the transaction being written. + uint64 start_version = 4; + uint64 lock_ttl = 5; + // TiKV can skip some checks, used for speeding up data migration. + bool skip_constraint_check = 6; + // For pessimistic transaction, some mutations don't need to be locked, for example, non-unique index key. + repeated bool is_pessimistic_lock = 7; + // How many keys this transaction involves in this region. + uint64 txn_size = 8; + // For pessimistic transactions only; used to check if a conflict lock is already committed. + uint64 for_update_ts = 9; + // If min_commit_ts > 0, this is a large transaction request, the final commit_ts + // will be inferred from `min_commit_ts`. + uint64 min_commit_ts = 10; + // When async commit is enabled, `secondaries` should be set as the key list of all secondary + // locks if the request prewrites the primary lock. + bool use_async_commit = 11; + repeated bytes secondaries = 12; + // When the transaction involves only one region, it's possible to commit the transaction + // directly with 1PC protocol. + bool try_one_pc = 13; + // The max commit ts is reserved for limiting the commit ts of 1PC or async commit, which can be used to avoid + // inconsistency with schema change. + uint64 max_commit_ts = 14; +} + +message PrewriteResponse { + errorpb.Error region_error = 1; + repeated KeyError errors = 2; + // 0 if the min_commit_ts is not ready or any other reason that async + // commit cannot proceed. The client can then fallback to normal way to + // continue committing the transaction if prewrite are all finished. + uint64 min_commit_ts = 3; + // When the transaction is successfully committed with 1PC protocol, this field will be set to + // the commit ts of the transaction. Otherwise, if TiKV failed to commit it with 1PC or the + // transaction is not 1PC, the value will be 0. + uint64 one_pc_commit_ts = 4; +} + +// Lock a set of keys to prepare to write to them. +message PessimisticLockRequest { + Context context = 1; + // In this case every `Op` of the mutations must be `PessimisticLock`. + repeated Mutation mutations = 2; + bytes primary_lock = 3; + uint64 start_version = 4; + uint64 lock_ttl = 5; + // Each locking command in a pessimistic transaction has its own timestamp. If locking fails, then + // the corresponding SQL statement can be retried with a later timestamp, TiDB does not need to + // retry the whole transaction. The name comes from the `SELECT ... FOR UPDATE` SQL statement which + // is a locking read. Each `SELECT ... FOR UPDATE` in a transaction will be assigned its own + // timestamp. + uint64 for_update_ts = 6; + // If the request is the first lock request, we don't need to detect deadlock. + bool is_first_lock = 7; + // Time to wait for lock released in milliseconds when encountering locks. + // 0 means using default timeout in TiKV. Negative means no wait. + int64 wait_timeout = 8; + // If it is true, TiKV will acquire the pessimistic lock regardless of write conflict + // and return the latest value. It's only supported for single mutation. + bool force = 9; + // If it is true, TiKV will return values of the keys if no error, so TiDB can cache the values for + // later read in the same transaction. + // When 'force' is set to true, this field is ignored. + bool return_values = 10; + // If min_commit_ts > 0, this is large transaction proto, the final commit_ts + // would be infered from min_commit_ts. + uint64 min_commit_ts = 11; +} + +message PessimisticLockResponse { + errorpb.Error region_error = 1; + repeated KeyError errors = 2; + // It carries the latest value and its commit ts if force in PessimisticLockRequest is true. + uint64 commit_ts = 3; + bytes value = 4; + // The values is set if 'return_values' is true in the request and no error. + // If 'force' is true, this field is not used. + repeated bytes values = 5; +} + +// Unlock keys locked using `PessimisticLockRequest`. +message PessimisticRollbackRequest { + Context context = 1; + uint64 start_version = 2; + uint64 for_update_ts = 3; + repeated bytes keys = 4; +} + +message PessimisticRollbackResponse { + errorpb.Error region_error = 1; + repeated KeyError errors = 2; +} + +// Used to update the lock_ttl of a psessimistic and/or large transaction to prevent it from been killed. +message TxnHeartBeatRequest { + Context context = 1; + // The key of the lock to update. + bytes primary_lock = 2; + // Start timestamp of the large transaction. + uint64 start_version = 3; + // The new TTL the sender would like. + uint64 advise_lock_ttl = 4; +} + +message TxnHeartBeatResponse { + errorpb.Error region_error = 1; + KeyError error = 2; + // The TTL actually set on the requested lock. + uint64 lock_ttl = 3; +} + +// CheckTxnStatusRequest checks the status of a transaction. +// If the transaction is rollbacked/committed, return that result. +// If the TTL of the transaction is exhausted, abort that transaction and inform the caller. +// Otherwise, returns the TTL information for the transaction. +// CheckTxnStatusRequest may also push forward the minCommitTS of a large transaction. +message CheckTxnStatusRequest { + Context context = 1; + // Primary key and lock ts together to locate the primary lock of a transaction. + bytes primary_key = 2; + // Starting timestamp of the transaction being checked. + uint64 lock_ts = 3; + // The start timestamp of the transaction which this request is part of. + uint64 caller_start_ts = 4; + // The client must specify the current time to TiKV using this timestamp. It is used to check TTL + // timeouts. It may be inaccurate. + uint64 current_ts = 5; + // If true, then TiKV will leave a rollback tombstone in the write CF for `primary_key`, even if + // that key is not locked. + bool rollback_if_not_exist = 6; +} + +message CheckTxnStatusResponse { + errorpb.Error region_error = 1; + KeyError error = 2; + // Three kinds of transaction status: + // locked: lock_ttl > 0 + // committed: commit_version > 0 + // rollbacked: lock_ttl = 0 && commit_version = 0 + uint64 lock_ttl = 3; + uint64 commit_version = 4; + // The action performed by TiKV (and why if the action is to rollback). + Action action = 5; + LockInfo lock_info = 6; +} + +// Part of the async commit protocol, checks for locks on all supplied keys. If a lock is missing, +// does not have a successful status, or belongs to another transaction, TiKV will leave a rollback +// tombstone for that key. +message CheckSecondaryLocksRequest { + Context context = 1; + repeated bytes keys = 2; + // Identifies the transaction we are investigating. + uint64 start_version = 3; +} + +message CheckSecondaryLocksResponse { + errorpb.Error region_error = 1; + KeyError error = 2; + // For each key in `keys` in `CheckSecondaryLocks`, there will be a lock in + // this list if there is a lock present and belonging to the correct transaction, + // nil otherwise. + repeated LockInfo locks = 3; + // If any of the locks have been committed, this is the commit ts used. If no + // locks have been committed, it will be zero. + uint64 commit_ts = 4; +} + +// The second phase of writing to TiKV. If there are no errors or conflicts, then this request +// commits a transaction so that its data can be read by other transactions. +message CommitRequest { + reserved 5; + reserved "binlog"; + Context context = 1; + // Identifies the transaction. + uint64 start_version = 2; + // All keys in the transaction (to be committed). + repeated bytes keys = 3; + // Timestamp for the end of the transaction. Must be greater than `start_version`. + uint64 commit_version = 4; +} + +message CommitResponse { + errorpb.Error region_error = 1; + KeyError error = 2; + // If the commit ts is derived from min_commit_ts, this field should be set. + uint64 commit_version = 3; +} + +// Not yet implemented. +message ImportRequest { + repeated Mutation mutations = 1; + uint64 commit_version = 2; +} + +message ImportResponse { + errorpb.Error region_error = 1; + string error = 2; +} + +// Cleanup a key by possibly unlocking it. +// From 4.0 onwards, this message is no longer used. +message CleanupRequest { + Context context = 1; + bytes key = 2; + uint64 start_version = 3; + // The current timestamp, used in combination with a lock's TTL to determine + // if the lock has expired. If `current_ts == 0`, then the key will be unlocked + // irrespective of its TTL. + uint64 current_ts = 4; +} + +message CleanupResponse { + errorpb.Error region_error = 1; + KeyError error = 2; + // Set if the key is already committed. + uint64 commit_version = 3; +} + +// Similar to a `Get` request. +message BatchGetRequest { + Context context = 1; + repeated bytes keys = 2; + uint64 version = 3; +} + +message BatchGetResponse { + errorpb.Error region_error = 1; + repeated KvPair pairs = 2; + // Set if ctx.scan_detail = true or meet slow query. + ScanDetailV2 scan_detail_v2 = 3; +} + +// Rollback a prewritten transaction. This will remove the preliminary data from the database, +// unlock locks, and leave a rollback tombstone. +message BatchRollbackRequest { + Context context = 1; + // Identify the transaction to be rolled back. + uint64 start_version = 2; + // The keys to rollback. + repeated bytes keys = 3; +} + +message BatchRollbackResponse { + errorpb.Error region_error = 1; + KeyError error = 2; +} + +// Scan the database for locks. Used at the start of the GC process to find all +// old locks. +message ScanLockRequest { + Context context = 1; + // Returns all locks with a start timestamp before `max_version`. + uint64 max_version = 2; + // Start scanning from this key. + bytes start_key = 3; + // The maximum number of locks to return. + uint32 limit = 4; +} + +message ScanLockResponse { + errorpb.Error region_error = 1; + KeyError error = 2; + // Info on all locks found by the scan. + repeated LockInfo locks = 3; +} + +// For all keys locked by the transaction identified by `start_version`, either +// commit or rollback the transaction and unlock the key. +message ResolveLockRequest { + Context context = 1; + uint64 start_version = 2; + // `commit_version == 0` means the transaction was rolled back. + // `commit_version > 0` means the transaction was committed at the given timestamp. + uint64 commit_version = 3; + repeated TxnInfo txn_infos = 4; + // Only resolve specified keys. + repeated bytes keys = 5; +} + +message ResolveLockResponse { + errorpb.Error region_error = 1; + KeyError error = 2; +} + +// Request TiKV to garbage collect all non-current data older than `safe_point`. +message GCRequest { + Context context = 1; + uint64 safe_point = 2; +} + +message GCResponse { + errorpb.Error region_error = 1; + KeyError error = 2; +} + +// Delete a range of data from TiKV. +// This message should not be used. +message DeleteRangeRequest { + Context context = 1; + bytes start_key = 2; + bytes end_key = 3; + // If true, the data will not be immediately deleted, but the operation will + // still be replicated via Raft. This is used to notify TiKV that the data + // will be deleted using `unsafe_destroy_range` soon. + bool notify_only = 4; +} + +message DeleteRangeResponse { + errorpb.Error region_error = 1; + string error = 2; +} + +// Raw commands. + +message RawGetRequest { + Context context = 1; + bytes key = 2; + string cf = 3; +} + +message RawGetResponse { + errorpb.Error region_error = 1; + string error = 2; + bytes value = 3; + bool not_found = 4; +} + +message RawBatchGetRequest { + Context context = 1; + repeated bytes keys = 2; + string cf = 3; +} + +message RawBatchGetResponse { + errorpb.Error region_error = 1; + repeated KvPair pairs = 2; +} + +message RawPutRequest { + Context context = 1; + bytes key = 2; + bytes value = 3; + string cf = 4; +} + +message RawPutResponse { + errorpb.Error region_error = 1; + string error = 2; +} + +message RawBatchPutRequest { + Context context = 1; + repeated KvPair pairs = 2; + string cf = 3; +} + +message RawBatchPutResponse { + errorpb.Error region_error = 1; + string error = 2; +} + +message RawDeleteRequest { + Context context = 1; + bytes key = 2; + string cf = 3; +} + +message RawDeleteResponse { + errorpb.Error region_error = 1; + string error = 2; +} + +message RawBatchDeleteRequest { + Context context = 1; + repeated bytes keys = 2; + string cf = 3; +} + +message RawBatchDeleteResponse { + errorpb.Error region_error = 1; + string error = 2; +} + +message RawScanRequest { + Context context = 1; + bytes start_key = 2; + uint32 limit = 3; + bool key_only = 4; + string cf = 5; + bool reverse = 6; + // For compatibility, when scanning forward, the range to scan is [start_key, end_key), where start_key < end_key; + // and when scanning backward, it scans [end_key, start_key) in descending order, where end_key < start_key. + bytes end_key = 7; +} + +message RawScanResponse { + errorpb.Error region_error = 1; + repeated KvPair kvs = 2; +} + +message RawDeleteRangeRequest { + Context context = 1; + bytes start_key = 2; + bytes end_key = 3; + string cf = 4; +} + +message RawDeleteRangeResponse { + errorpb.Error region_error = 1; + string error = 2; +} + +message RawBatchScanRequest { + Context context = 1; + repeated KeyRange ranges = 2; // scanning range + uint32 each_limit = 3; // max number of returning kv pairs for each scanning range + bool key_only = 4; + string cf = 5; + bool reverse = 6; +} + +message RawBatchScanResponse { + errorpb.Error region_error = 1; + repeated KvPair kvs = 2; +} + +// Store commands (sent to a whole TiKV cluster, rather than a certain region). + +message UnsafeDestroyRangeRequest { + Context context = 1; + bytes start_key = 2; + bytes end_key = 3; +} + +message UnsafeDestroyRangeResponse { + errorpb.Error region_error = 1; + string error = 2; +} + +message RegisterLockObserverRequest { + Context context = 1; + uint64 max_ts = 2; +} + +message RegisterLockObserverResponse { + string error = 1; +} + +message CheckLockObserverRequest { + Context context = 1; + uint64 max_ts = 2; +} + +message CheckLockObserverResponse { + string error = 1; + bool is_clean = 2; + repeated LockInfo locks = 3; +} + +message RemoveLockObserverRequest { + Context context = 1; + uint64 max_ts = 2; +} + +message RemoveLockObserverResponse { + string error = 1; +} + +message PhysicalScanLockRequest { + Context context = 1; + uint64 max_ts = 2; + bytes start_key = 3; + uint32 limit = 4; +} + +message PhysicalScanLockResponse { + string error = 1; + repeated LockInfo locks = 2; +} + +// Sent from PD to a TiKV node. +message SplitRegionRequest { + Context context = 1; + bytes split_key = 2 [deprecated=true]; + repeated bytes split_keys = 3; // when use it to do batch split, `split_key` should be empty. +} + +message SplitRegionResponse { + errorpb.Error region_error = 1; + metapb.Region left = 2 [deprecated=true]; // set when there are only 2 result regions. + metapb.Region right = 3 [deprecated=true]; // set when there are only 2 result regions. + repeated metapb.Region regions = 4; // include all result regions. +} + +// Sent from TiFlash to a TiKV node. +message ReadIndexRequest{ + Context context = 1; + + // TiKV checks the given range if there is any unapplied lock + // blocking the read request. + uint64 start_ts = 2; + repeated KeyRange ranges = 3; +} + +message ReadIndexResponse{ + errorpb.Error region_error = 1; + uint64 read_index = 2; + // If `locked` is set, this read request is blocked by a lock. + // The lock should be returned to the client. + kvrpcpb.LockInfo locked = 3; +} + +// VerKv commands + +enum VerOp { + VerPut = 0; + VerDel = 1; +} + +message VerMutation { + VerOp op = 1; + bytes key = 2; + bytes value = 3; +} + +message VerValue { + bytes value = 1; + uint64 version = 2; +} + +message VerError { + string error = 1; +} + +message VerKvPair { + VerError error = 1; + bytes key = 2; + VerValue value = 3; +} + +message VerGetRequest { + Context context = 1; + bytes key = 2; + uint64 start_version = 3; // start_version == 0 means without start version +} + +message VerGetResponse { + errorpb.Error region_error = 1; + VerError error = 2; + VerValue value = 3; + bool not_found = 4; +} + +message VerBatchGetRequest { + Context context = 1; + repeated bytes key = 2; + uint64 start_version = 3; +} + +message VerBatchGetResponse { + errorpb.Error region_error = 1; + repeated VerKvPair pairs = 2; +} + +message VerMutRequest { + Context context = 1; + VerMutation mut = 2; + uint64 version = 3; +} + +message VerMutResponse { + errorpb.Error region_error = 1; + VerError error = 2; +} + +message VerBatchMutRequest { + Context context = 1; + repeated VerMutation muts = 2; + uint64 version = 3; +} + +message VerBatchMutResponse { + errorpb.Error region_error = 1; + VerError error = 2; +} + +message VerScanRequest { + Context context = 1; + bytes start_key = 2; + bytes end_key = 3; + uint32 limit = 4; + bool key_only = 5; + bool reverse = 6; + uint64 start_version = 7; +} + +message VerScanResponse { + errorpb.Error region_error = 1; + repeated VerKvPair pairs = 2; +} + +message VerDeleteRangeRequest { + Context context = 1; + bytes start_key = 2; + bytes end_key = 3; +} + +message VerDeleteRangeResponse { + errorpb.Error region_error = 1; + VerError error = 2; +} + +// Commands for debugging transactions. + +message MvccGetByKeyRequest { + Context context = 1; + bytes key = 2; +} + +message MvccGetByKeyResponse { + errorpb.Error region_error = 1; + string error = 2; + MvccInfo info = 3; +} + +message MvccGetByStartTsRequest { + Context context = 1; + uint64 start_ts = 2; +} + +message MvccGetByStartTsResponse { + errorpb.Error region_error = 1; + string error = 2; + bytes key = 3; + MvccInfo info = 4; +} + +// Helper messages. + +// Miscellaneous metadata attached to most requests. +message Context { + reserved 4; + reserved "read_quorum"; + uint64 region_id = 1; + metapb.RegionEpoch region_epoch = 2; + metapb.Peer peer = 3; + uint64 term = 5; + CommandPri priority = 6; + IsolationLevel isolation_level = 7; + bool not_fill_cache = 8; + bool sync_log = 9; + // True means return handle time detail. + bool handle_time = 10; + // True means return scan cf's detail. + bool scan_detail = 11; + bool replica_read = 12; + repeated uint64 resolved_locks = 13; + uint64 max_execution_duration_ms = 14; + + // After a region applies to `applied_index`, we can get a + // snapshot for the region even if the peer is a follower. + uint64 applied_index = 15; + // A hint for TiKV to schedule tasks more fairly. Query with same task ID + // may share same priority and resource quota. + uint64 task_id = 16; +} + +message LockInfo { + bytes primary_lock = 1; + uint64 lock_version = 2; + bytes key = 3; + uint64 lock_ttl = 4; + // How many keys this transaction involves in this region. + uint64 txn_size = 5; + Op lock_type = 6; + uint64 lock_for_update_ts = 7; + // Fields for transactions that are using Async Commit. + bool use_async_commit = 8; + uint64 min_commit_ts = 9; + repeated bytes secondaries = 10; +} + +message KeyError { + LockInfo locked = 1; // Client should backoff or cleanup the lock then retry. + string retryable = 2; // Client may restart the txn. e.g write conflict. + string abort = 3; // Client should abort the txn. + WriteConflict conflict = 4; // Write conflict is moved from retryable to here. + AlreadyExist already_exist = 5; // Key already exists + Deadlock deadlock = 6; // Deadlock is used in pessimistic transaction for single statement rollback. + CommitTsExpired commit_ts_expired = 7; // Commit ts is earlier than min commit ts of a transaction. + TxnNotFound txn_not_found = 8; // Txn not found when checking txn status. + CommitTsTooLarge commit_ts_too_large = 9; // Calculated commit TS exceeds the limit given by the user. +} + +message WriteConflict { + uint64 start_ts = 1; + uint64 conflict_ts = 2; + bytes key = 3; + bytes primary = 4; + uint64 conflict_commit_ts = 5; +} + +message AlreadyExist { + bytes key = 1; +} + +message Deadlock { + uint64 lock_ts = 1; + bytes lock_key = 2; + uint64 deadlock_key_hash = 3; +} + +message CommitTsExpired { + uint64 start_ts = 1; + uint64 attempted_commit_ts = 2; + bytes key = 3; + uint64 min_commit_ts = 4; +} + +message TxnNotFound { + uint64 start_ts = 1; + bytes primary_key = 2; +} + +message CommitTsTooLarge { + uint64 commit_ts = 1; // The calculated commit TS. +} + +enum CommandPri { + Normal = 0; // Normal is the default value. + Low = 1; + High = 2; +} + +enum IsolationLevel { + SI = 0; // SI = snapshot isolation + RC = 1; // RC = read committed +} + +message HandleTime { + // Time spent in queue. + int64 wait_ms = 1; + // Processing time, excluding wait time. + int64 process_ms = 2; +} + +message ScanInfo { + int64 total = 1; + int64 processed = 2; +} + +message ScanDetail { + ScanInfo write = 1; + ScanInfo lock = 2; + ScanInfo data = 3; +} + +message ScanDetailV2 { + uint64 processed_versions = 1; + uint64 total_versions = 2; + uint64 rocksdb_delete_skipped_count = 3; + uint64 rocksdb_key_skipped_count = 4; + uint64 rocksdb_block_cache_hit_count = 5; + uint64 rocksdb_block_read_count = 6; + uint64 rocksdb_block_read_byte = 7; +} + +message ExecDetails { + HandleTime handle_time = 1;// set when ctx.handle_time = true or meet slow query + ScanDetail scan_detail = 2;// set when ctx.scan_detail = true or meet slow query + bool use_scan_detail_v2 = 3;// set when ctx.scan_detail = true or meet slow query + ScanDetailV2 scan_detail_v2 = 4;// set when ctx.scan_detail = true or meet slow query +} + +message KvPair { + KeyError error = 1; + bytes key = 2; + bytes value = 3; +} + +enum Op { + Put = 0; + Del = 1; + Lock = 2; + Rollback = 3; + // insert operation has a constraint that key should not exist before. + Insert = 4; + PessimisticLock = 5; + CheckNotExists = 6; +} + +enum Assertion { + None = 0; + Exist = 1; + NotExist = 2; +} + +message Mutation { + Op op = 1; + bytes key = 2; + bytes value = 3; + Assertion assertion = 4; +} + +message MvccWrite { + Op type = 1; + uint64 start_ts = 2; + uint64 commit_ts = 3; + bytes short_value = 4; +} + +message MvccValue { + uint64 start_ts = 1; + bytes value = 2; +} + +message MvccLock { + Op type = 1; + uint64 start_ts = 2; + bytes primary = 3; + bytes short_value = 4; +} + +message MvccInfo { + MvccLock lock = 1; + repeated MvccWrite writes = 2; + repeated MvccValue values = 3; +} + +message TxnInfo { + uint64 txn = 1; + uint64 status = 2; +} + +enum Action { + NoAction = 0; + TTLExpireRollback = 1; + LockNotExistRollback = 2; + MinCommitTSPushed = 3; +} + +message KeyRange { + bytes start_key = 1; + bytes end_key = 2; +} + +enum ExtraOp { + Noop = 0; + // ReadOldValue represents to output the previous value for delete/update operations. + ReadOldValue = 1; +} diff --git a/tikv-client-proto/proto/metapb.proto b/tikv-client-proto/proto/metapb.proto new file mode 100644 index 00000000..a26ba925 --- /dev/null +++ b/tikv-client-proto/proto/metapb.proto @@ -0,0 +1,95 @@ +syntax = "proto3"; +package metapb; + +import "encryptionpb.proto"; +import "gogoproto/gogo.proto"; +import "rustproto.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (rustproto.lite_runtime_all) = true; + +option java_package = "org.tikv.kvproto"; + +message Cluster { + uint64 id = 1; + // max peer count for a region. + // pd will do the auto-balance if region peer count mismatches. + uint32 max_peer_count = 2; + // more attributes...... +} + +enum StoreState { + Up = 0; + Offline = 1; + Tombstone = 2; +} + +// Case insensitive key/value for replica constraints. +message StoreLabel { + string key = 1; + string value = 2; +} + +message Store { + uint64 id = 1; + // Address to handle client requests (kv, cop, etc.) + string address = 2; + StoreState state = 3; + repeated StoreLabel labels = 4; + string version = 5; + // Address to handle peer requests (raft messages from other store). + // Empty means same as address. + string peer_address = 6; + // Status address provides the HTTP service for external components + string status_address = 7; + string git_hash = 8; + // The start timestamp of the current store + int64 start_timestamp = 9; + string deploy_path = 10; + // The last heartbeat timestamp of the store. + int64 last_heartbeat = 11; +} + +message RegionEpoch { + // Conf change version, auto increment when add or remove peer + uint64 conf_ver = 1; + // Region version, auto increment when split or merge + uint64 version = 2; +} + +message Region { + uint64 id = 1; + // Region key range [start_key, end_key). + bytes start_key = 2; + bytes end_key = 3; + RegionEpoch region_epoch = 4; + repeated Peer peers = 5; + // Encryption metadata for start_key and end_key. encryption_meta.iv is IV for start_key. + // IV for end_key is calculated from (encryption_meta.iv + len(start_key)). + // The field is only used by PD and should be ignored otherwise. + // If encryption_meta is empty (i.e. nil), it means start_key and end_key are unencrypted. + encryptionpb.EncryptionMeta encryption_meta = 6; +} + +enum PeerRole { + // Voter -> Voter + Voter = 0; + // Learner/None -> Learner + Learner = 1; + // Learner/None -> Voter + IncomingVoter = 2; + // Voter -> Learner + DemotingVoter = 3; + // We forbid Voter -> None, it can introduce unavailability as discussed in + // etcd-io/etcd#7625 + // Learner -> None can be apply directly, doesn't need to be stored as + // joint state. +} + +message Peer { + uint64 id = 1; + uint64 store_id = 2; + PeerRole role = 3; +} diff --git a/tikv-client-proto/proto/mpp.proto b/tikv-client-proto/proto/mpp.proto new file mode 100644 index 00000000..c8a06dfa --- /dev/null +++ b/tikv-client-proto/proto/mpp.proto @@ -0,0 +1,61 @@ +syntax = "proto3"; +package mpp; + +import "gogoproto/gogo.proto"; +import "coprocessor.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; + +option java_package = "org.tikv.kvproto"; + +// TaskMeta contains meta of a mpp plan, including query's ts and task address. +message TaskMeta { + uint64 start_ts = 1; // start ts of a query + int64 task_id = 2; // if task id is -1 , it indicates a tidb task. + int64 partition_id = 3; // Only used for hash partition + string address = 4; // target address of this task. +} + +// Dipsatch the task request to different tiflash servers. +message DispatchTaskRequest { + TaskMeta meta = 1; + bytes encoded_plan = 2; + int64 timeout = 3; + repeated coprocessor.RegionInfo regions = 4; + // If this task contains table scan, we still need their region info. + int64 schema_ver = 5; +} + +// Get response of DispatchTaskRequest. +message DispatchTaskResponse { + Error error = 1; +} + +// CancelTaskRequest closes the execution of a task. +message CancelTaskRequest { + TaskMeta meta = 1; + Error error = 2; +} + +message CancelTaskResponse { + Error error = 1; +} + +// build connection between different tasks. Data is sent by the tasks that are closer to the data sources. +message EstablishMPPConnectionRequest { + TaskMeta sender_meta = 1; // node closer to the source + TaskMeta receiver_meta = 2; // node closer to the tidb mpp gather. +} + +// Data packets wrap tipb.SelectResponse. +message MPPDataPacket { + bytes data = 1; + Error error = 2; +} + +message Error { + int32 code = 1; + string msg = 2; +} diff --git a/tikv-client-proto/proto/pdpb.proto b/tikv-client-proto/proto/pdpb.proto new file mode 100644 index 00000000..e305c549 --- /dev/null +++ b/tikv-client-proto/proto/pdpb.proto @@ -0,0 +1,632 @@ +syntax = "proto3"; +package pdpb; + +import "metapb.proto"; +import "eraftpb.proto"; +import "replication_modepb.proto"; + +import "gogoproto/gogo.proto"; +import "rustproto.proto"; + +option (gogoproto.sizer_all) = true; +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (rustproto.lite_runtime_all) = true; + +option java_package = "org.tikv.kvproto"; + +service PD { + // GetMembers get the member list of this cluster. It does not require + // the cluster_id in request matchs the id of this cluster. + rpc GetMembers(GetMembersRequest) returns (GetMembersResponse) {} + + rpc Tso(stream TsoRequest) returns (stream TsoResponse) {} + + rpc Bootstrap(BootstrapRequest) returns (BootstrapResponse) {} + + rpc IsBootstrapped(IsBootstrappedRequest) returns (IsBootstrappedResponse) {} + + rpc AllocID(AllocIDRequest) returns (AllocIDResponse) {} + + rpc GetStore(GetStoreRequest) returns (GetStoreResponse) {} + + rpc PutStore(PutStoreRequest) returns (PutStoreResponse) {} + + rpc GetAllStores(GetAllStoresRequest) returns (GetAllStoresResponse) {} + + rpc StoreHeartbeat(StoreHeartbeatRequest) returns (StoreHeartbeatResponse) {} + + rpc RegionHeartbeat(stream RegionHeartbeatRequest) returns (stream RegionHeartbeatResponse) {} + + rpc GetRegion(GetRegionRequest) returns (GetRegionResponse) {} + + rpc GetPrevRegion(GetRegionRequest) returns (GetRegionResponse) {} + + rpc GetRegionByID(GetRegionByIDRequest) returns (GetRegionResponse) {} + + rpc ScanRegions(ScanRegionsRequest) returns (ScanRegionsResponse) {} + + rpc AskSplit(AskSplitRequest) returns (AskSplitResponse) { + // Use AskBatchSplit instead. + option deprecated = true; + } + + rpc ReportSplit(ReportSplitRequest) returns (ReportSplitResponse) { + // Use ResportBatchSplit instead. + option deprecated = true; + } + + rpc AskBatchSplit(AskBatchSplitRequest) returns (AskBatchSplitResponse) {} + + rpc ReportBatchSplit(ReportBatchSplitRequest) returns (ReportBatchSplitResponse) {} + + rpc GetClusterConfig(GetClusterConfigRequest) returns (GetClusterConfigResponse) {} + + rpc PutClusterConfig(PutClusterConfigRequest) returns (PutClusterConfigResponse) {} + + rpc ScatterRegion(ScatterRegionRequest) returns (ScatterRegionResponse) {} + + rpc GetGCSafePoint(GetGCSafePointRequest) returns (GetGCSafePointResponse) {} + + rpc UpdateGCSafePoint(UpdateGCSafePointRequest) returns (UpdateGCSafePointResponse) {} + + rpc UpdateServiceGCSafePoint(UpdateServiceGCSafePointRequest) returns (UpdateServiceGCSafePointResponse) {} + + rpc SyncRegions(stream SyncRegionRequest) returns (stream SyncRegionResponse) {} + + rpc GetOperator(GetOperatorRequest) returns (GetOperatorResponse) {} + + rpc SyncMaxTS(SyncMaxTSRequest) returns (SyncMaxTSResponse) {} +} + +message RequestHeader { + // cluster_id is the ID of the cluster which be sent to. + uint64 cluster_id = 1; + // sender_id is the ID of the sender server, also member ID or etcd ID. + uint64 sender_id = 2; +} + +message ResponseHeader { + // cluster_id is the ID of the cluster which sent the response. + uint64 cluster_id = 1; + Error error = 2; +} + +enum ErrorType { + OK = 0; + UNKNOWN = 1; + NOT_BOOTSTRAPPED = 2; + STORE_TOMBSTONE = 3; + ALREADY_BOOTSTRAPPED = 4; + INCOMPATIBLE_VERSION = 5; + REGION_NOT_FOUND = 6; +} + +message Error { + ErrorType type = 1; + string message = 2; +} + +message TsoRequest { + RequestHeader header = 1; + + uint32 count = 2; + string dc_location = 3; +} + +message Timestamp { + int64 physical = 1; + int64 logical = 2; +} + +message TsoResponse { + ResponseHeader header = 1; + + uint32 count = 2; + Timestamp timestamp = 3; +} + +message BootstrapRequest { + RequestHeader header = 1; + + metapb.Store store = 2; + metapb.Region region = 3; +} + +message BootstrapResponse { + ResponseHeader header = 1; + replication_modepb.ReplicationStatus replication_status = 2; +} + +message IsBootstrappedRequest { + RequestHeader header = 1; +} + +message IsBootstrappedResponse { + ResponseHeader header = 1; + + bool bootstrapped = 2; +} + +message AllocIDRequest { + RequestHeader header = 1; +} + +message AllocIDResponse { + ResponseHeader header = 1; + + uint64 id = 2; +} + +message GetStoreRequest { + RequestHeader header = 1; + + uint64 store_id = 2; +} + +message GetStoreResponse { + ResponseHeader header = 1; + + metapb.Store store = 2; + StoreStats stats = 3; +} + +message PutStoreRequest { + RequestHeader header = 1; + + metapb.Store store = 2; +} + +message PutStoreResponse { + ResponseHeader header = 1; + replication_modepb.ReplicationStatus replication_status = 2; +} + +message GetAllStoresRequest { + RequestHeader header = 1; + // Do NOT return tombstone stores if set to true. + bool exclude_tombstone_stores = 2; +} + +message GetAllStoresResponse { + ResponseHeader header = 1; + + repeated metapb.Store stores = 2; +} + +message GetRegionRequest { + RequestHeader header = 1; + + bytes region_key = 2; +} + +message GetRegionResponse { + reserved 4; + + ResponseHeader header = 1; + + metapb.Region region = 2; + metapb.Peer leader = 3; + // Leader considers that these peers are down. + repeated PeerStats down_peers = 5; + // Pending peers are the peers that the leader can't consider as + // working followers. + repeated metapb.Peer pending_peers = 6; +} + +message GetRegionByIDRequest { + RequestHeader header = 1; + + uint64 region_id = 2; +} + +// Use GetRegionResponse as the response of GetRegionByIDRequest. + +message ScanRegionsRequest { + RequestHeader header = 1; + + bytes start_key = 2; + int32 limit = 3; // no limit when limit <= 0. + bytes end_key = 4; // end_key is +inf when it is empty. +} + +message Region { + metapb.Region region = 1; + metapb.Peer leader = 2; + // Leader considers that these peers are down. + repeated PeerStats down_peers = 3; + // Pending peers are the peers that the leader can't consider as + // working followers. + repeated metapb.Peer pending_peers = 4; +} + +message ScanRegionsResponse { + ResponseHeader header = 1; + + // Keep for backword compatibability. + repeated metapb.Region region_metas = 2; + repeated metapb.Peer leaders = 3; + + // Extended region info with down/pending peers. + repeated Region regions = 4; +} + +message GetClusterConfigRequest { + RequestHeader header = 1; +} + +message GetClusterConfigResponse { + ResponseHeader header = 1; + + metapb.Cluster cluster = 2; +} + +message PutClusterConfigRequest { + RequestHeader header = 1; + + metapb.Cluster cluster = 2; +} + +message PutClusterConfigResponse { + ResponseHeader header = 1; +} + +message Member { + // name is the name of the PD member. + string name = 1; + // member_id is the unique id of the PD member. + uint64 member_id = 2; + repeated string peer_urls = 3; + repeated string client_urls = 4; + int32 leader_priority = 5; + string deploy_path = 6; + string binary_version = 7; + string git_hash = 8; +} + +message GetMembersRequest { + RequestHeader header = 1; +} + +message GetMembersResponse { + ResponseHeader header = 1; + + repeated Member members = 2; + Member leader = 3; + Member etcd_leader = 4; + map tso_allocator_leaders = 5; +} + +message PeerStats { + metapb.Peer peer = 1; + uint64 down_seconds = 2; +} + +message RegionHeartbeatRequest { + RequestHeader header = 1; + + metapb.Region region = 2; + // Leader Peer sending the heartbeat. + metapb.Peer leader = 3; + // Leader considers that these peers are down. + repeated PeerStats down_peers = 4; + // Pending peers are the peers that the leader can't consider as + // working followers. + repeated metapb.Peer pending_peers = 5; + // Bytes read/written during this period. + uint64 bytes_written = 6; + uint64 bytes_read = 7; + // Keys read/written during this period. + uint64 keys_written = 8; + uint64 keys_read = 9; + // Approximate region size. + uint64 approximate_size = 10; + reserved 11; + // Actually reported time interval + TimeInterval interval = 12; + // Approximate number of keys. + uint64 approximate_keys = 13; + // Term is the term of raft group. + uint64 term = 14; + replication_modepb.RegionReplicationStatus replication_status = 15; +} + +message ChangePeer { + metapb.Peer peer = 1; + eraftpb.ConfChangeType change_type = 2; +} + +message ChangePeerV2 { + // If changes is empty, it means that to exit joint state. + repeated ChangePeer changes = 1; +} + +message TransferLeader { + metapb.Peer peer = 1; +} + +message Merge { + metapb.Region target = 1; +} + +message SplitRegion { + CheckPolicy policy = 1; + repeated bytes keys = 2; +} + +enum CheckPolicy { + SCAN = 0; + APPROXIMATE = 1; + USEKEY = 2; +} + +message RegionHeartbeatResponse { + ResponseHeader header = 1; + + // Notice, Pd only allows handling reported epoch >= current pd's. + // Leader peer reports region status with RegionHeartbeatRequest + // to pd regularly, pd will determine whether this region + // should do ChangePeer or not. + // E,g, max peer number is 3, region A, first only peer 1 in A. + // 1. Pd region state -> Peers (1), ConfVer (1). + // 2. Leader peer 1 reports region state to pd, pd finds the + // peer number is < 3, so first changes its current region + // state -> Peers (1, 2), ConfVer (1), and returns ChangePeer Adding 2. + // 3. Leader does ChangePeer, then reports Peers (1, 2), ConfVer (2), + // pd updates its state -> Peers (1, 2), ConfVer (2). + // 4. Leader may report old Peers (1), ConfVer (1) to pd before ConfChange + // finished, pd stills responses ChangePeer Adding 2, of course, we must + // guarantee the second ChangePeer can't be applied in TiKV. + ChangePeer change_peer = 2; + // Pd can return transfer_leader to let TiKV does leader transfer itself. + TransferLeader transfer_leader = 3; + // ID of the region + uint64 region_id = 4; + metapb.RegionEpoch region_epoch = 5; + // Leader of the region at the moment of the corresponding request was made. + metapb.Peer target_peer = 6; + Merge merge = 7; + // PD sends split_region to let TiKV split a region into two regions. + SplitRegion split_region = 8; + // Multiple change peer operations atomically. + // Note: PD can use both ChangePeer and ChangePeerV2 at the same time + // (not in the same RegionHeartbeatResponse). + // Now, PD use ChangePeerV2 only for replacing peers. + ChangePeerV2 change_peer_v2 = 9; +} + +message AskSplitRequest { + RequestHeader header = 1; + + metapb.Region region = 2; +} + +message AskSplitResponse { + ResponseHeader header = 1; + + // We split the region into two, first uses the origin + // parent region id, and the second uses the new_region_id. + // We must guarantee that the new_region_id is global unique. + uint64 new_region_id = 2; + // The peer ids for the new split region. + repeated uint64 new_peer_ids = 3; +} + +message ReportSplitRequest { + RequestHeader header = 1; + + metapb.Region left = 2; + metapb.Region right = 3; +} + +message ReportSplitResponse { + ResponseHeader header = 1; +} + +message AskBatchSplitRequest { + RequestHeader header = 1; + + metapb.Region region = 2; + uint32 split_count = 3; +} + +message SplitID { + uint64 new_region_id = 1; + repeated uint64 new_peer_ids = 2; +} + +message AskBatchSplitResponse { + ResponseHeader header = 1; + + repeated SplitID ids = 2; +} + +message ReportBatchSplitRequest { + RequestHeader header = 1; + + repeated metapb.Region regions = 2; +} + +message ReportBatchSplitResponse { + ResponseHeader header = 1; +} + +message TimeInterval { + // The unix timestamp in seconds of the start of this period. + uint64 start_timestamp = 1; + // The unix timestamp in seconds of the end of this period. + uint64 end_timestamp = 2; +} + +message RecordPair { + string key = 1; + uint64 value = 2; +} + +message StoreStats { + uint64 store_id = 1; + // Capacity for the store. + uint64 capacity = 2; + // Available size for the store. + uint64 available = 3; + // Total region count in this store. + uint32 region_count = 4; + // Current sending snapshot count. + uint32 sending_snap_count = 5; + // Current receiving snapshot count. + uint32 receiving_snap_count = 6; + // When the store is started (unix timestamp in seconds). + uint32 start_time = 7; + // How many region is applying snapshot. + uint32 applying_snap_count = 8; + // If the store is busy + bool is_busy = 9; + // Actually used space by db + uint64 used_size = 10; + // Bytes written for the store during this period. + uint64 bytes_written = 11; + // Keys written for the store during this period. + uint64 keys_written = 12; + // Bytes read for the store during this period. + uint64 bytes_read = 13; + // Keys read for the store during this period. + uint64 keys_read = 14; + // Actually reported time interval + TimeInterval interval = 15; + // Threads' CPU usages in the store + repeated RecordPair cpu_usages = 16; + // Threads' read disk I/O rates in the store + repeated RecordPair read_io_rates = 17; + // Threads' write disk I/O rates in the store + repeated RecordPair write_io_rates = 18; + // Operations' latencies in the store + repeated RecordPair op_latencies = 19; +} + +message StoreHeartbeatRequest { + RequestHeader header = 1; + + StoreStats stats = 2; +} + +message StoreHeartbeatResponse { + ResponseHeader header = 1; + replication_modepb.ReplicationStatus replication_status = 2; + string cluster_version = 3; +} + +message ScatterRegionRequest { + RequestHeader header = 1; + + uint64 region_id = 2; + + // PD will use these region information if it can't find the region. + // For example, the region is just split and hasn't report to PD yet. + metapb.Region region = 3; + metapb.Peer leader = 4; + + // If group is defined, the regions with the same group would be scattered as a whole group. + // If not defined, the regions would be scattered in a cluster level. + string group = 5; +} + +message ScatterRegionResponse { + ResponseHeader header = 1; +} + +message GetGCSafePointRequest { + RequestHeader header = 1; +} + +message GetGCSafePointResponse { + ResponseHeader header = 1; + + uint64 safe_point = 2; +} + +message UpdateGCSafePointRequest { + RequestHeader header = 1; + + uint64 safe_point = 2; +} + +message UpdateGCSafePointResponse { + ResponseHeader header = 1; + + uint64 new_safe_point = 2; +} + +message UpdateServiceGCSafePointRequest { + RequestHeader header = 1; + + bytes service_id = 2; + int64 TTL = 3; + uint64 safe_point = 4; +} + +message UpdateServiceGCSafePointResponse { + ResponseHeader header = 1; + + bytes service_id = 2; + int64 TTL = 3; + uint64 min_safe_point = 4; +} + +message RegionStat { + // Bytes read/written during this period. + uint64 bytes_written = 1; + uint64 bytes_read = 2; + // Keys read/written during this period. + uint64 keys_written = 3; + uint64 keys_read = 4; +} + +message SyncRegionRequest{ + RequestHeader header = 1; + Member member = 2; + // the follower PD will use the start index to locate historical changes + // that require synchronization. + uint64 start_index = 3; +} + +message SyncRegionResponse{ + ResponseHeader header = 1; + // the leader PD will send the repsonds include + // changed regions records and the index of the first record. + repeated metapb.Region regions = 2; + uint64 start_index = 3; + repeated RegionStat region_stats = 4; + repeated metapb.Peer region_leaders = 5; +} + +message GetOperatorRequest { + RequestHeader header = 1; + uint64 region_id = 2; +} + +enum OperatorStatus { + SUCCESS = 0; + TIMEOUT = 1; + CANCEL = 2; + REPLACE = 3; + RUNNING = 4; +} + +message GetOperatorResponse { + ResponseHeader header = 1; + uint64 region_id = 2; + bytes desc = 3; + OperatorStatus status = 4; + bytes kind = 5; +} + +message SyncMaxTSRequest { + RequestHeader header = 1; + Timestamp max_ts = 2; +} + +message SyncMaxTSResponse { + ResponseHeader header = 1; + Timestamp max_local_ts = 2; + repeated string dcs = 3; +} diff --git a/tikv-client-proto/proto/raft_cmdpb.proto b/tikv-client-proto/proto/raft_cmdpb.proto new file mode 100644 index 00000000..16dd2ada --- /dev/null +++ b/tikv-client-proto/proto/raft_cmdpb.proto @@ -0,0 +1,331 @@ +syntax = "proto3"; +package raft_cmdpb; + +import "metapb.proto"; +import "errorpb.proto"; +import "eraftpb.proto"; +import "import_sstpb.proto"; +import "rustproto.proto"; + +option (rustproto.lite_runtime_all) = true; + +option java_package = "org.tikv.kvproto"; + +message GetRequest { + string cf = 1; + bytes key = 2; +} + +message GetResponse { + bytes value = 1; +} + +message PutRequest { + string cf = 1; + bytes key = 2; + bytes value = 3; +} + +message PutResponse {} + +message DeleteRequest { + string cf = 1; + bytes key = 2; +} + +message DeleteResponse {} + +message DeleteRangeRequest { + string cf = 1; + bytes start_key = 2; + bytes end_key = 3; + bool notify_only = 4; +} + +message DeleteRangeResponse {} + +message SnapRequest {} + +message SnapResponse { + metapb.Region region = 1; +} + +message PrewriteRequest { + bytes key = 1; + bytes value = 2; + bytes lock = 3; +} + +message PrewriteResponse {} + +message IngestSSTRequest { + import_sstpb.SSTMeta sst = 1; +} + +message IngestSSTResponse {} + +message ReadIndexRequest {} + +message ReadIndexResponse{ + uint64 read_index = 1; +} + +enum CmdType { + Invalid = 0; + Get = 1; + Put = 3; + Delete = 4; + Snap = 5; + Prewrite = 6; + DeleteRange = 7; + IngestSST = 8; + ReadIndex = 9; +} + +message Request { + CmdType cmd_type = 1; + GetRequest get = 2; + PutRequest put = 4; + DeleteRequest delete = 5; + SnapRequest snap = 6; + PrewriteRequest prewrite = 7; + DeleteRangeRequest delete_range = 8; + IngestSSTRequest ingest_sst = 9; + ReadIndexRequest read_index = 10; +} + +message Response { + CmdType cmd_type = 1; + GetResponse get = 2; + PutResponse put = 4; + DeleteResponse delete = 5; + SnapResponse snap = 6; + PrewriteResponse prewrite = 7; + DeleteRangeResponse delte_range = 8; + IngestSSTResponse ingest_sst = 9; + ReadIndexResponse read_index = 10; +} + +message ChangePeerRequest { + // This can be only called in internal RaftStore now. + eraftpb.ConfChangeType change_type = 1; + metapb.Peer peer = 2; +} + +message ChangePeerResponse { + metapb.Region region = 1; +} + +message ChangePeerV2Request { + repeated ChangePeerRequest changes = 1; +} + +message ChangePeerV2Response { + metapb.Region region = 1; +} + +message SplitRequest { + // This can be only called in internal RaftStore now. + // The split_key must be in the been splitting region. + bytes split_key = 1; + // We split the region into two, first uses the origin + // parent region id, and the second uses the new_region_id. + // We must guarantee that the new_region_id is global unique. + uint64 new_region_id = 2; + // The peer ids for the new split region. + repeated uint64 new_peer_ids = 3; + // If true, right region derive the origin region_id, + // left region use new_region_id. + // Will be ignored in batch split, use `BatchSplitRequest::right_derive` instead. + bool right_derive = 4 [deprecated=true]; +} + +message SplitResponse { + metapb.Region left = 1; + metapb.Region right = 2; +} + +message BatchSplitRequest { + repeated SplitRequest requests = 1; + // If true, the last region derive the origin region_id, + // other regions use new ids. + bool right_derive = 2; +} + +message BatchSplitResponse { + repeated metapb.Region regions = 1; +} + +message CompactLogRequest { + uint64 compact_index = 1; + uint64 compact_term = 2; +} + +message CompactLogResponse {} + +message TransferLeaderRequest { + metapb.Peer peer = 1; +} + +message TransferLeaderResponse {} + +message ComputeHashRequest { + bytes context = 1; +} + +message VerifyHashRequest { + uint64 index = 1; + bytes hash = 2; + + bytes context = 3; +} + +message VerifyHashResponse {} + +message PrepareMergeRequest { + uint64 min_index = 1; + metapb.Region target = 2; +} + +message PrepareMergeResponse {} + +message CommitMergeRequest { + metapb.Region source = 1; + uint64 commit = 2; + repeated eraftpb.Entry entries = 3; +} + +message CommitMergeResponse {} + +message RollbackMergeRequest { + uint64 commit = 1; +} + +message RollbackMergeResponse {} + +enum AdminCmdType { + InvalidAdmin = 0; + ChangePeer = 1; + // Use `BatchSplit` instead. + Split = 2 [deprecated=true]; + CompactLog = 3; + TransferLeader = 4; + ComputeHash = 5; + VerifyHash = 6; + PrepareMerge = 7; + CommitMerge = 8; + RollbackMerge = 9; + BatchSplit = 10; + ChangePeerV2 = 11; +} + +message AdminRequest { + AdminCmdType cmd_type = 1; + ChangePeerRequest change_peer = 2; + SplitRequest split = 3 [deprecated=true]; + CompactLogRequest compact_log = 4; + TransferLeaderRequest transfer_leader = 5; + VerifyHashRequest verify_hash = 6; + PrepareMergeRequest prepare_merge = 7; + CommitMergeRequest commit_merge = 8; + RollbackMergeRequest rollback_merge = 9; + BatchSplitRequest splits = 10; + ChangePeerV2Request change_peer_v2 = 11; + ComputeHashRequest compute_hash = 12; +} + +message AdminResponse { + AdminCmdType cmd_type = 1; + ChangePeerResponse change_peer = 2; + SplitResponse split = 3 [deprecated=true]; + CompactLogResponse compact_log = 4; + TransferLeaderResponse transfer_leader = 5; + VerifyHashResponse verify_hash = 6; + PrepareMergeResponse prepare_merge = 7; + CommitMergeResponse commit_merge = 8; + RollbackMergeResponse rollback_merge = 9; + BatchSplitResponse splits = 10; + ChangePeerV2Response change_peer_v2 = 11; +} + +// For get the leader of the region. +message RegionLeaderRequest {} + +message RegionLeaderResponse { + metapb.Peer leader = 1; +} + +// For getting more information of the region. +// We add some admin operations (ChangePeer, Split...) into the pb job list, +// then pd server will peek the first one, handle it and then pop it from the job lib. +// But sometimes, the pd server may crash before popping. When another pd server +// starts and finds the job is running but not finished, it will first check whether +// the raft server already has handled this job. +// E,g, for ChangePeer, if we add Peer10 into region1 and find region1 has already had +// Peer10, we can think this ChangePeer is finished, and can pop this job from job list +// directly. +message RegionDetailRequest {} + +message RegionDetailResponse { + metapb.Region region = 1; + metapb.Peer leader = 2; +} + + +enum StatusCmdType { + InvalidStatus = 0; + RegionLeader = 1; + RegionDetail = 2; +} + +message StatusRequest { + StatusCmdType cmd_type = 1; + RegionLeaderRequest region_leader = 2; + RegionDetailRequest region_detail = 3; +} + +message StatusResponse { + StatusCmdType cmd_type = 1; + RegionLeaderResponse region_leader = 2; + RegionDetailResponse region_detail = 3; +} + +message RaftRequestHeader { + uint64 region_id = 1; + metapb.Peer peer = 2; + // true for read linearization + bool read_quorum = 3; + // 16 bytes, to distinguish request. + bytes uuid = 4; + + metapb.RegionEpoch region_epoch = 5; + uint64 term = 6; + + bool sync_log = 7; + bool replica_read = 8; + + // Read requests can be responsed directly after the Raft applys to `applied_index`. + uint64 applied_index = 9; +} + +message RaftResponseHeader { + errorpb.Error error = 1; + bytes uuid = 2; + uint64 current_term = 3; +} + +message RaftCmdRequest { + RaftRequestHeader header = 1; + // We can't enclose normal requests and administrator request + // at same time. + repeated Request requests = 2; + AdminRequest admin_request = 3; + StatusRequest status_request = 4; +} + +message RaftCmdResponse { + RaftResponseHeader header = 1; + repeated Response responses = 2; + AdminResponse admin_response = 3; + StatusResponse status_response = 4; +} diff --git a/tikv-client-proto/proto/raft_serverpb.proto b/tikv-client-proto/proto/raft_serverpb.proto new file mode 100644 index 00000000..01d9ab56 --- /dev/null +++ b/tikv-client-proto/proto/raft_serverpb.proto @@ -0,0 +1,111 @@ +syntax = "proto3"; +package raft_serverpb; + +import "eraftpb.proto"; +import "metapb.proto"; +import "rustproto.proto"; + +option (rustproto.lite_runtime_all) = true; + +option java_package = "org.tikv.kvproto"; + +message RaftMessage { + uint64 region_id = 1; + metapb.Peer from_peer = 2; + metapb.Peer to_peer = 3; + eraftpb.Message message = 4; + metapb.RegionEpoch region_epoch = 5; + // true means to_peer is a tombstone peer and it should remove itself. + bool is_tombstone = 6; + // Region key range [start_key, end_key). + bytes start_key = 7; + bytes end_key = 8; + // If it has value, to_peer should be removed if merge is never going to complete. + metapb.Region merge_target = 9; + ExtraMessage extra_msg = 10; +} + +message RaftTruncatedState { + uint64 index = 1; + uint64 term = 2; +} + +message SnapshotCFFile { + string cf = 1; + uint64 size = 2; + uint32 checksum = 3; +} + +message SnapshotMeta { + repeated SnapshotCFFile cf_files = 1; +} + +message SnapshotChunk { + RaftMessage message = 1; + bytes data = 2; +} + +message Done {} + +message KeyValue { + bytes key = 1; + bytes value = 2; +} + +message RaftSnapshotData { + metapb.Region region = 1; + uint64 file_size = 2; + repeated KeyValue data = 3; + uint64 version = 4; + SnapshotMeta meta = 5; +} + +message StoreIdent { + uint64 cluster_id = 1; + uint64 store_id = 2; +} + +message RaftLocalState { + eraftpb.HardState hard_state = 1; + uint64 last_index = 2; +} + +message RaftApplyState { + uint64 applied_index = 1; + uint64 last_commit_index = 3; + uint64 commit_index = 4; + uint64 commit_term = 5; + RaftTruncatedState truncated_state = 2; +} + +enum PeerState { + Normal = 0; + Applying = 1; + Tombstone = 2; + Merging = 3; +} + +message MergeState { + uint64 min_index = 1; + metapb.Region target = 2; + uint64 commit = 3; +} + +message RegionLocalState { + PeerState state = 1; + metapb.Region region = 2; + MergeState merge_state = 3; +} + +enum ExtraMessageType { + MsgRegionWakeUp = 0; + MsgWantRollbackMerge = 1; + MsgCheckStalePeer = 2; + MsgCheckStalePeerResponse = 3; +} + +message ExtraMessage { + ExtraMessageType type = 1; + uint64 premerge_commit = 2; + repeated metapb.Peer check_peers = 3; +} diff --git a/tikv-client-proto/proto/replication_modepb.proto b/tikv-client-proto/proto/replication_modepb.proto new file mode 100644 index 00000000..9bfb8814 --- /dev/null +++ b/tikv-client-proto/proto/replication_modepb.proto @@ -0,0 +1,51 @@ +syntax = "proto3"; +package replication_modepb; + +enum ReplicationMode { + // The standard mode. Replicate logs to majority peer. + MAJORITY = 0; + // DR mode. Replicate logs among 2 DCs. + DR_AUTO_SYNC = 1; +} + +// The replication status sync from PD to TiKV. +message ReplicationStatus { + ReplicationMode mode = 1; + DRAutoSync dr_auto_sync = 2; +} + +enum DRAutoSyncState { + // Raft logs need to sync between different DCs + SYNC = 0; + // Raft logs need to sync to majority peers + ASYNC = 1; + // Switching from ASYNC to SYNC mode + SYNC_RECOVER = 2; +} + +// The status of dr-autosync mode. +message DRAutoSync { + // The key of the label that used for distinguish different DC. + string label_key = 1; + DRAutoSyncState state = 2; + // Unique ID of the state, it increases after each state transfer. + uint64 state_id = 3; + // Duration to wait before switching to SYNC by force (in seconds) + int32 wait_sync_timeout_hint = 4; +} + +enum RegionReplicationState { + // The region's state is unknown + UNKNOWN = 0; + // Logs sync to majority peers + SIMPLE_MAJORITY = 1; + // Logs sync to different DCs + INTEGRITY_OVER_LABEL = 2; +} + +// The replication status sync from TiKV to PD. +message RegionReplicationStatus { + RegionReplicationState state = 1; + // Unique ID of the state, it increases after each state transfer. + uint64 state_id = 2; +} diff --git a/tikv-client-proto/proto/span.proto b/tikv-client-proto/proto/span.proto new file mode 100644 index 00000000..16bfbbcc --- /dev/null +++ b/tikv-client-proto/proto/span.proto @@ -0,0 +1,29 @@ +syntax = "proto3"; +package span; + +message SpanSet { + uint64 start_time_ns = 1; + uint64 cycles_per_sec = 2; + repeated Span spans = 3; + uint64 create_time_ns = 4; +} + +message Root {} +message Parent { uint64 id = 1; } +message Continue { uint64 id = 1; } + +message Link { + oneof link { + Root root = 1; + Parent parent = 2; + Continue continue = 3; + } +} + +message Span { + uint64 id = 1; + Link link = 2; + uint64 begin_cycles = 3; + uint64 end_cycles = 4; + uint32 event = 5; +} diff --git a/tikv-client-proto/proto/tikvpb.proto b/tikv-client-proto/proto/tikvpb.proto new file mode 100644 index 00000000..4cddc597 --- /dev/null +++ b/tikv-client-proto/proto/tikvpb.proto @@ -0,0 +1,213 @@ +syntax = "proto3"; +package tikvpb; + +import "coprocessor.proto"; +import "kvrpcpb.proto"; +import "mpp.proto"; +import "raft_serverpb.proto"; + +import "gogoproto/gogo.proto"; +import "rustproto.proto"; + +option (gogoproto.sizer_all) = true; +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (rustproto.lite_runtime_all) = true; + +option java_package = "org.tikv.kvproto"; + +// Key/value store API for TiKV. +service Tikv { + // Commands using a transactional interface. + rpc KvGet(kvrpcpb.GetRequest) returns (kvrpcpb.GetResponse) {} + rpc KvScan(kvrpcpb.ScanRequest) returns (kvrpcpb.ScanResponse) {} + rpc KvPrewrite(kvrpcpb.PrewriteRequest) returns (kvrpcpb.PrewriteResponse) {} + rpc KvPessimisticLock(kvrpcpb.PessimisticLockRequest) returns (kvrpcpb.PessimisticLockResponse) {} + rpc KVPessimisticRollback(kvrpcpb.PessimisticRollbackRequest) returns (kvrpcpb.PessimisticRollbackResponse) {} + rpc KvTxnHeartBeat(kvrpcpb.TxnHeartBeatRequest) returns (kvrpcpb.TxnHeartBeatResponse) {} + rpc KvCheckTxnStatus(kvrpcpb.CheckTxnStatusRequest) returns (kvrpcpb.CheckTxnStatusResponse) {} + rpc KvCheckSecondaryLocks(kvrpcpb.CheckSecondaryLocksRequest) returns (kvrpcpb.CheckSecondaryLocksResponse) {} + rpc KvCommit(kvrpcpb.CommitRequest) returns (kvrpcpb.CommitResponse) {} + rpc KvImport(kvrpcpb.ImportRequest) returns (kvrpcpb.ImportResponse) {} + rpc KvCleanup(kvrpcpb.CleanupRequest) returns (kvrpcpb.CleanupResponse) {} + rpc KvBatchGet(kvrpcpb.BatchGetRequest) returns (kvrpcpb.BatchGetResponse) {} + rpc KvBatchRollback(kvrpcpb.BatchRollbackRequest) returns (kvrpcpb.BatchRollbackResponse) {} + rpc KvScanLock(kvrpcpb.ScanLockRequest) returns (kvrpcpb.ScanLockResponse) {} + rpc KvResolveLock(kvrpcpb.ResolveLockRequest) returns (kvrpcpb.ResolveLockResponse) {} + rpc KvGC(kvrpcpb.GCRequest) returns (kvrpcpb.GCResponse) {} + rpc KvDeleteRange(kvrpcpb.DeleteRangeRequest) returns (kvrpcpb.DeleteRangeResponse) {} + + // Raw commands; no transaction support. + rpc RawGet(kvrpcpb.RawGetRequest) returns (kvrpcpb.RawGetResponse) {} + rpc RawBatchGet(kvrpcpb.RawBatchGetRequest) returns (kvrpcpb.RawBatchGetResponse) {} + rpc RawPut(kvrpcpb.RawPutRequest) returns (kvrpcpb.RawPutResponse) {} + rpc RawBatchPut(kvrpcpb.RawBatchPutRequest) returns (kvrpcpb.RawBatchPutResponse) {} + rpc RawDelete(kvrpcpb.RawDeleteRequest) returns (kvrpcpb.RawDeleteResponse) {} + rpc RawBatchDelete(kvrpcpb.RawBatchDeleteRequest) returns (kvrpcpb.RawBatchDeleteResponse) {} + rpc RawScan(kvrpcpb.RawScanRequest) returns (kvrpcpb.RawScanResponse) {} + rpc RawDeleteRange(kvrpcpb.RawDeleteRangeRequest) returns (kvrpcpb.RawDeleteRangeResponse) {} + rpc RawBatchScan(kvrpcpb.RawBatchScanRequest) returns (kvrpcpb.RawBatchScanResponse) {} + + // VerKV commands. + rpc VerGet(kvrpcpb.VerGetRequest) returns (kvrpcpb.VerGetResponse) {} + rpc VerBatchGet(kvrpcpb.VerBatchGetRequest) returns (kvrpcpb.VerBatchGetResponse) {} + rpc VerMut(kvrpcpb.VerMutRequest) returns (kvrpcpb.VerMutResponse) {} + rpc VerBatchMut(kvrpcpb.VerBatchMutRequest) returns (kvrpcpb.VerBatchMutResponse) {} + rpc VerScan(kvrpcpb.VerScanRequest) returns (kvrpcpb.VerScanResponse) {} + rpc VerDeleteRange(kvrpcpb.VerDeleteRangeRequest) returns (kvrpcpb.VerDeleteRangeResponse) {} + + // Store commands (sent to a each TiKV node in a cluster, rather than a certain region). + rpc UnsafeDestroyRange(kvrpcpb.UnsafeDestroyRangeRequest) returns (kvrpcpb.UnsafeDestroyRangeResponse) {} + rpc RegisterLockObserver(kvrpcpb.RegisterLockObserverRequest) returns (kvrpcpb.RegisterLockObserverResponse) {} + rpc CheckLockObserver(kvrpcpb.CheckLockObserverRequest) returns (kvrpcpb.CheckLockObserverResponse) {} + rpc RemoveLockObserver(kvrpcpb.RemoveLockObserverRequest) returns (kvrpcpb.RemoveLockObserverResponse) {} + rpc PhysicalScanLock(kvrpcpb.PhysicalScanLockRequest) returns (kvrpcpb.PhysicalScanLockResponse) {} + + // Commands for executing SQL in the TiKV coprocessor (i.e., 'pushed down' to TiKV rather than + // executed in TiDB). + rpc Coprocessor(coprocessor.Request) returns (coprocessor.Response) {} + rpc CoprocessorStream(coprocessor.Request) returns (stream coprocessor.Response) {} + rpc BatchCoprocessor(coprocessor.BatchRequest) returns (stream coprocessor.BatchResponse) {} + + // Raft commands (sent between TiKV nodes). + rpc Raft(stream raft_serverpb.RaftMessage) returns (raft_serverpb.Done) {} + rpc BatchRaft(stream BatchRaftMessage) returns (raft_serverpb.Done) {} + rpc Snapshot(stream raft_serverpb.SnapshotChunk) returns (raft_serverpb.Done) {} + + // Sent from PD or TiDB to a TiKV node. + rpc SplitRegion (kvrpcpb.SplitRegionRequest) returns (kvrpcpb.SplitRegionResponse) {} + // Sent from TiFlash or TiKV to a TiKV node. + rpc ReadIndex(kvrpcpb.ReadIndexRequest) returns (kvrpcpb.ReadIndexResponse) {} + + // Commands for debugging transactions. + rpc MvccGetByKey(kvrpcpb.MvccGetByKeyRequest) returns (kvrpcpb.MvccGetByKeyResponse) {} + rpc MvccGetByStartTs(kvrpcpb.MvccGetByStartTsRequest) returns (kvrpcpb.MvccGetByStartTsResponse) {} + + // Batched commands. + rpc BatchCommands(stream BatchCommandsRequest) returns (stream BatchCommandsResponse) {} + + // These are for mpp execution. + rpc DispatchMPPTask(mpp.DispatchTaskRequest) returns (mpp.DispatchTaskResponse) {} + rpc CancelMPPTask(mpp.CancelTaskRequest) returns (mpp.CancelTaskResponse) {} + rpc EstablishMPPConnection(mpp.EstablishMPPConnectionRequest) returns (stream mpp.MPPDataPacket) {} +} + +message BatchCommandsRequest { + repeated Request requests = 1; + repeated uint64 request_ids = 2; + + message Request { + oneof cmd { + kvrpcpb.GetRequest Get = 1; + kvrpcpb.ScanRequest Scan = 2; + kvrpcpb.PrewriteRequest Prewrite = 3; + kvrpcpb.CommitRequest Commit = 4; + kvrpcpb.ImportRequest Import = 5; + kvrpcpb.CleanupRequest Cleanup = 6; + kvrpcpb.BatchGetRequest BatchGet = 7; + kvrpcpb.BatchRollbackRequest BatchRollback = 8; + kvrpcpb.ScanLockRequest ScanLock = 9; + kvrpcpb.ResolveLockRequest ResolveLock = 10; + kvrpcpb.GCRequest GC = 11; + kvrpcpb.DeleteRangeRequest DeleteRange = 12; + + kvrpcpb.RawGetRequest RawGet = 13; + kvrpcpb.RawBatchGetRequest RawBatchGet = 14; + kvrpcpb.RawPutRequest RawPut = 15; + kvrpcpb.RawBatchPutRequest RawBatchPut = 16; + kvrpcpb.RawDeleteRequest RawDelete = 17; + kvrpcpb.RawBatchDeleteRequest RawBatchDelete = 18; + kvrpcpb.RawScanRequest RawScan = 19; + kvrpcpb.RawDeleteRangeRequest RawDeleteRange = 20; + kvrpcpb.RawBatchScanRequest RawBatchScan = 21; + + coprocessor.Request Coprocessor = 22; + kvrpcpb.PessimisticLockRequest PessimisticLock = 23; + kvrpcpb.PessimisticRollbackRequest PessimisticRollback = 24; + + kvrpcpb.CheckTxnStatusRequest CheckTxnStatus = 25; + kvrpcpb.TxnHeartBeatRequest TxnHeartBeat = 26; + + kvrpcpb.VerGetRequest VerGet = 27; + kvrpcpb.VerBatchGetRequest VerBatchGet = 28; + kvrpcpb.VerMutRequest VerMut = 29; + kvrpcpb.VerBatchMutRequest VerBatchMut = 30; + kvrpcpb.VerScanRequest VerScan = 31; + kvrpcpb.VerDeleteRangeRequest VerDeleteRange = 32; + + kvrpcpb.CheckSecondaryLocksRequest CheckSecondaryLocks = 33; + + // For some test cases. + BatchCommandsEmptyRequest Empty = 255; + } + } +} + +message BatchCommandsResponse { + repeated Response responses = 1; + repeated uint64 request_ids = 2; + // 280 means TiKV gRPC cpu usage is 280%. + uint64 transport_layer_load = 3; + + message Response { + oneof cmd { + kvrpcpb.GetResponse Get = 1; + kvrpcpb.ScanResponse Scan = 2; + kvrpcpb.PrewriteResponse Prewrite = 3; + kvrpcpb.CommitResponse Commit = 4; + kvrpcpb.ImportResponse Import = 5; + kvrpcpb.CleanupResponse Cleanup = 6; + kvrpcpb.BatchGetResponse BatchGet = 7; + kvrpcpb.BatchRollbackResponse BatchRollback = 8; + kvrpcpb.ScanLockResponse ScanLock = 9; + kvrpcpb.ResolveLockResponse ResolveLock = 10; + kvrpcpb.GCResponse GC = 11; + kvrpcpb.DeleteRangeResponse DeleteRange = 12; + + kvrpcpb.RawGetResponse RawGet = 13; + kvrpcpb.RawBatchGetResponse RawBatchGet = 14; + kvrpcpb.RawPutResponse RawPut = 15; + kvrpcpb.RawBatchPutResponse RawBatchPut = 16; + kvrpcpb.RawDeleteResponse RawDelete = 17; + kvrpcpb.RawBatchDeleteResponse RawBatchDelete = 18; + kvrpcpb.RawScanResponse RawScan = 19; + kvrpcpb.RawDeleteRangeResponse RawDeleteRange = 20; + kvrpcpb.RawBatchScanResponse RawBatchScan = 21; + + coprocessor.Response Coprocessor = 22; + kvrpcpb.PessimisticLockResponse PessimisticLock = 23; + kvrpcpb.PessimisticRollbackResponse PessimisticRollback = 24; + + kvrpcpb.CheckTxnStatusResponse CheckTxnStatus = 25; + kvrpcpb.TxnHeartBeatResponse TxnHeartBeat = 26; + + kvrpcpb.VerGetResponse VerGet = 27; + kvrpcpb.VerBatchGetResponse VerBatchGet = 28; + kvrpcpb.VerMutResponse VerMut = 29; + kvrpcpb.VerBatchMutResponse VerBatchMut = 30; + kvrpcpb.VerScanResponse VerScan = 31; + kvrpcpb.VerDeleteRangeResponse VerDeleteRange = 32; + + kvrpcpb.CheckSecondaryLocksResponse CheckSecondaryLocks = 33; + + // For some test cases. + BatchCommandsEmptyResponse Empty = 255; + } + } +} + +message BatchRaftMessage { + repeated raft_serverpb.RaftMessage msgs = 1; +} + +message BatchCommandsEmptyRequest { + // ID of the test request. + uint64 test_id = 1; + // TiKV needs to delay at least such a time to response the client. + uint64 delay_time = 2; +} + +message BatchCommandsEmptyResponse { + // ID of the test request. + uint64 test_id = 1; +} diff --git a/tikv-client-proto/src/lib.rs b/tikv-client-proto/src/lib.rs new file mode 100644 index 00000000..0a9a84af --- /dev/null +++ b/tikv-client-proto/src/lib.rs @@ -0,0 +1,10 @@ +// Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0. + +use protos::*; +pub use protos::{coprocessor, errorpb, kvrpcpb, metapb, mpp, pdpb, raft_serverpb, tikvpb}; + +#[allow(dead_code)] +#[allow(clippy::all)] +mod protos { + include!(concat!(env!("OUT_DIR"), "/protos/mod.rs")); +} diff --git a/tikv-client-store/Cargo.toml b/tikv-client-store/Cargo.toml index 30a1022c..ecf83b0d 100644 --- a/tikv-client-store/Cargo.toml +++ b/tikv-client-store/Cargo.toml @@ -8,6 +8,6 @@ async-trait = "0.1" derive-new = "0.5" futures = { version = "0.3.5", features = ["compat", "async-await", "thread-pool"] } grpcio = { version = "0.6", features = ["secure", "prost-codec"], default-features = false } -kvproto = { git = "https://github.com/pingcap/kvproto.git", features = ["prost-codec"], default-features = false } log = "0.4" tikv-client-common = { path = "../tikv-client-common" } +tikv-client-proto = { path = "../tikv-client-proto" } diff --git a/tikv-client-store/src/client.rs b/tikv-client-store/src/client.rs index bdf03106..8c120a96 100644 --- a/tikv-client-store/src/client.rs +++ b/tikv-client-store/src/client.rs @@ -4,8 +4,8 @@ use crate::{request::Request, Region, Result, SecurityManager}; use async_trait::async_trait; use derive_new::new; use grpcio::{CallOption, Environment}; -use kvproto::tikvpb::TikvClient; use std::{any::Any, sync::Arc, time::Duration}; +use tikv_client_proto::tikvpb::TikvClient; /// A trait for connecting to TiKV stores. pub trait KvConnect: Sized + Send + Sync + 'static { diff --git a/tikv-client-store/src/errors.rs b/tikv-client-store/src/errors.rs index 6c90d93e..5b0200b7 100644 --- a/tikv-client-store/src/errors.rs +++ b/tikv-client-store/src/errors.rs @@ -1,7 +1,7 @@ // Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0. use crate::Error; -use kvproto::kvrpcpb; +use tikv_client_proto::kvrpcpb; pub trait HasRegionError { fn region_error(&mut self) -> Option; diff --git a/tikv-client-store/src/region.rs b/tikv-client-store/src/region.rs index 7504754e..a8c22f2a 100644 --- a/tikv-client-store/src/region.rs +++ b/tikv-client-store/src/region.rs @@ -1,6 +1,6 @@ use crate::{Error, Key, Result}; use derive_new::new; -use kvproto::{kvrpcpb, metapb}; +use tikv_client_proto::{kvrpcpb, metapb}; pub type RegionId = u64; pub type StoreId = u64; diff --git a/tikv-client-store/src/request.rs b/tikv-client-store/src/request.rs index 53c66e55..edefbb1a 100644 --- a/tikv-client-store/src/request.rs +++ b/tikv-client-store/src/request.rs @@ -3,8 +3,8 @@ use crate::{ErrorKind, Result}; use async_trait::async_trait; use grpcio::CallOption; -use kvproto::{kvrpcpb, tikvpb::TikvClient}; use std::any::Any; +use tikv_client_proto::{kvrpcpb, tikvpb::TikvClient}; #[async_trait] pub trait Request: Any + Sync + Send + 'static {