1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
//! A JSON-RPC 1.0 & 2.0 endpoint for Zebra.
//!
//! This endpoint is compatible with clients that incorrectly send
//! `"jsonrpc" = 1.0` fields in JSON-RPC 1.0 requests,
//! such as `lightwalletd`.
//!
//! See the full list of
//! [Differences between JSON-RPC 1.0 and 2.0.](https://www.simple-is-better.org/rpc/#differences-between-1-0-and-2-0)

use std::{fmt, panic, thread::available_parallelism};

use cookie::Cookie;
use http_request_compatibility::With;
use jsonrpc_core::{Compatibility, MetaIoHandler};
use jsonrpc_http_server::{CloseHandle, ServerBuilder};
use tokio::task::JoinHandle;
use tower::Service;
use tracing::*;

use zebra_chain::{
    block, chain_sync_status::ChainSyncStatus, chain_tip::ChainTip, parameters::Network,
};
use zebra_network::AddressBookPeers;
use zebra_node_services::mempool;

use crate::{
    config::Config,
    methods::{Rpc, RpcImpl},
    server::{
        http_request_compatibility::HttpRequestMiddleware,
        rpc_call_compatibility::FixRpcResponseMiddleware,
    },
};

#[cfg(feature = "getblocktemplate-rpcs")]
use crate::methods::{GetBlockTemplateRpc, GetBlockTemplateRpcImpl};

pub mod cookie;
pub mod http_request_compatibility;
pub mod rpc_call_compatibility;

#[cfg(test)]
mod tests;

/// Zebra RPC Server
#[derive(Clone)]
pub struct RpcServer {
    /// The RPC config.
    config: Config,

    /// The configured network.
    network: Network,

    /// Zebra's application version, with build metadata.
    build_version: String,

    /// A handle that shuts down the RPC server.
    close_handle: CloseHandle,
}

impl fmt::Debug for RpcServer {
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
        f.debug_struct("RpcServer")
            .field("config", &self.config)
            .field("network", &self.network)
            .field("build_version", &self.build_version)
            .field(
                "close_handle",
                // TODO: when it stabilises, use std::any::type_name_of_val(&self.close_handle)
                &"CloseHandle",
            )
            .finish()
    }
}

/// The message to log when logging the RPC server's listen address
pub const OPENED_RPC_ENDPOINT_MSG: &str = "Opened RPC endpoint at ";

impl RpcServer {
    /// Start a new RPC server endpoint using the supplied configs and services.
    ///
    /// `build_version` and `user_agent` are version strings for the application,
    /// which are used in RPC responses.
    ///
    /// Returns [`JoinHandle`]s for the RPC server and `sendrawtransaction` queue tasks,
    /// and a [`RpcServer`] handle, which can be used to shut down the RPC server task.
    //
    // TODO:
    // - put some of the configs or services in their own struct?
    // - replace VersionString with semver::Version, and update the tests to provide valid versions
    #[allow(clippy::too_many_arguments)]
    pub fn spawn<
        VersionString,
        UserAgentString,
        Mempool,
        State,
        Tip,
        BlockVerifierRouter,
        SyncStatus,
        AddressBook,
    >(
        config: Config,
        #[cfg_attr(not(feature = "getblocktemplate-rpcs"), allow(unused_variables))]
        mining_config: crate::config::mining::Config,
        build_version: VersionString,
        user_agent: UserAgentString,
        mempool: Mempool,
        state: State,
        #[cfg_attr(not(feature = "getblocktemplate-rpcs"), allow(unused_variables))]
        block_verifier_router: BlockVerifierRouter,
        #[cfg_attr(not(feature = "getblocktemplate-rpcs"), allow(unused_variables))]
        sync_status: SyncStatus,
        #[cfg_attr(not(feature = "getblocktemplate-rpcs"), allow(unused_variables))]
        address_book: AddressBook,
        latest_chain_tip: Tip,
        network: Network,
    ) -> (JoinHandle<()>, JoinHandle<()>, Option<Self>)
    where
        VersionString: ToString + Clone + Send + 'static,
        UserAgentString: ToString + Clone + Send + 'static,
        Mempool: tower::Service<
                mempool::Request,
                Response = mempool::Response,
                Error = zebra_node_services::BoxError,
            > + Clone
            + Send
            + Sync
            + 'static,
        Mempool::Future: Send,
        State: Service<
                zebra_state::ReadRequest,
                Response = zebra_state::ReadResponse,
                Error = zebra_state::BoxError,
            > + Clone
            + Send
            + Sync
            + 'static,
        State::Future: Send,
        Tip: ChainTip + Clone + Send + Sync + 'static,
        BlockVerifierRouter: Service<
                zebra_consensus::Request,
                Response = block::Hash,
                Error = zebra_consensus::BoxError,
            > + Clone
            + Send
            + Sync
            + 'static,
        <BlockVerifierRouter as Service<zebra_consensus::Request>>::Future: Send,
        SyncStatus: ChainSyncStatus + Clone + Send + Sync + 'static,
        AddressBook: AddressBookPeers + Clone + Send + Sync + 'static,
    {
        if let Some(listen_addr) = config.listen_addr {
            info!("Trying to open RPC endpoint at {}...", listen_addr,);

            // Create handler compatible with V1 and V2 RPC protocols
            let mut io: MetaIoHandler<(), _> =
                MetaIoHandler::new(Compatibility::Both, FixRpcResponseMiddleware);

            #[cfg(feature = "getblocktemplate-rpcs")]
            {
                // Initialize the getblocktemplate rpc method handler
                let get_block_template_rpc_impl = GetBlockTemplateRpcImpl::new(
                    &network,
                    mining_config.clone(),
                    mempool.clone(),
                    state.clone(),
                    latest_chain_tip.clone(),
                    block_verifier_router,
                    sync_status,
                    address_book,
                );

                io.extend_with(get_block_template_rpc_impl.to_delegate());
            }

            // Initialize the rpc methods with the zebra version
            let (rpc_impl, rpc_tx_queue_task_handle) = RpcImpl::new(
                build_version.clone(),
                user_agent,
                network.clone(),
                config.debug_force_finished_sync,
                #[cfg(feature = "getblocktemplate-rpcs")]
                mining_config.debug_like_zcashd,
                #[cfg(not(feature = "getblocktemplate-rpcs"))]
                true,
                mempool,
                state,
                latest_chain_tip,
            );

            io.extend_with(rpc_impl.to_delegate());

            // If zero, automatically scale threads to the number of CPU cores
            let mut parallel_cpu_threads = config.parallel_cpu_threads;
            if parallel_cpu_threads == 0 {
                parallel_cpu_threads = available_parallelism().map(usize::from).unwrap_or(1);
            }

            // The server is a blocking task, which blocks on executor shutdown.
            // So we need to start it in a std::thread.
            // (Otherwise tokio panics on RPC port conflict, which shuts down the RPC server.)
            let span = Span::current();
            let start_server = move || {
                span.in_scope(|| {
                    let middleware = if config.enable_cookie_auth {
                        let cookie = Cookie::default();
                        cookie::write_to_disk(&cookie, &config.cookie_dir)
                            .expect("Zebra must be able to write the auth cookie to the disk");
                        HttpRequestMiddleware::default().with(cookie)
                    } else {
                        HttpRequestMiddleware::default()
                    };

                    // Use a different tokio executor from the rest of Zebra,
                    // so that large RPCs and any task handling bugs don't impact Zebra.
                    let server_instance = ServerBuilder::new(io)
                        .threads(parallel_cpu_threads)
                        // TODO: disable this security check if we see errors from lightwalletd
                        //.allowed_hosts(DomainsValidation::Disabled)
                        .request_middleware(middleware)
                        .start_http(&listen_addr)
                        .expect("Unable to start RPC server");

                    info!("{OPENED_RPC_ENDPOINT_MSG}{}", server_instance.address());

                    let close_handle = server_instance.close_handle();

                    let rpc_server_handle = RpcServer {
                        config,
                        network,
                        build_version: build_version.to_string(),
                        close_handle,
                    };

                    (server_instance, rpc_server_handle)
                })
            };

            // Propagate panics from the std::thread
            let (server_instance, rpc_server_handle) = match std::thread::spawn(start_server).join()
            {
                Ok(rpc_server) => rpc_server,
                Err(panic_object) => panic::resume_unwind(panic_object),
            };

            // The server is a blocking task, which blocks on executor shutdown.
            // So we need to wait on it on a std::thread, inside a tokio blocking task.
            // (Otherwise tokio panics when we shut down the RPC server.)
            let span = Span::current();
            let wait_on_server = move || {
                span.in_scope(|| {
                    server_instance.wait();

                    info!("Stopped RPC endpoint");
                })
            };

            let span = Span::current();
            let rpc_server_task_handle = tokio::task::spawn_blocking(move || {
                let thread_handle = std::thread::spawn(wait_on_server);

                // Propagate panics from the inner std::thread to the outer tokio blocking task
                span.in_scope(|| match thread_handle.join() {
                    Ok(()) => (),
                    Err(panic_object) => panic::resume_unwind(panic_object),
                })
            });

            (
                rpc_server_task_handle,
                rpc_tx_queue_task_handle,
                Some(rpc_server_handle),
            )
        } else {
            // There is no RPC port, so the RPC tasks do nothing.
            (
                tokio::task::spawn(futures::future::pending().in_current_span()),
                tokio::task::spawn(futures::future::pending().in_current_span()),
                None,
            )
        }
    }

    /// Shut down this RPC server, blocking the current thread.
    ///
    /// This method can be called from within a tokio executor without panicking.
    /// But it is blocking, so `shutdown()` should be used instead.
    pub fn shutdown_blocking(&self) {
        Self::shutdown_blocking_inner(self.close_handle.clone(), self.config.clone())
    }

    /// Shut down this RPC server asynchronously.
    /// Returns a task that completes when the server is shut down.
    pub fn shutdown(&self) -> JoinHandle<()> {
        let close_handle = self.close_handle.clone();
        let config = self.config.clone();
        let span = Span::current();

        tokio::task::spawn_blocking(move || {
            span.in_scope(|| Self::shutdown_blocking_inner(close_handle, config))
        })
    }

    /// Shuts down this RPC server using its `close_handle`.
    ///
    /// See `shutdown_blocking()` for details.
    fn shutdown_blocking_inner(close_handle: CloseHandle, config: Config) {
        // The server is a blocking task, so it can't run inside a tokio thread.
        // See the note at wait_on_server.
        let span = Span::current();
        let wait_on_shutdown = move || {
            span.in_scope(|| {
                if config.enable_cookie_auth {
                    if let Err(err) = cookie::remove_from_disk(&config.cookie_dir) {
                        warn!(
                            ?err,
                            "unexpectedly could not remove the rpc auth cookie from the disk"
                        )
                    }
                }

                info!("Stopping RPC server");
                close_handle.clone().close();
                debug!("Stopped RPC server");
            })
        };

        let span = Span::current();
        let thread_handle = std::thread::spawn(wait_on_shutdown);

        // Propagate panics from the inner std::thread to the outer tokio blocking task
        span.in_scope(|| match thread_handle.join() {
            Ok(()) => (),
            Err(panic_object) => panic::resume_unwind(panic_object),
        })
    }
}

impl Drop for RpcServer {
    fn drop(&mut self) {
        // Block on shutting down, propagating panics.
        // This can take around 150 seconds.
        //
        // Without this shutdown, Zebra's RPC unit tests sometimes crashed with memory errors.
        self.shutdown_blocking();
    }
}