aboutsummaryrefslogtreecommitdiff
path: root/src/client
diff options
context:
space:
mode:
authorAustin Hellyer <[email protected]>2016-11-26 11:37:18 -0800
committerAustin Hellyer <[email protected]>2016-11-26 11:37:18 -0800
commit77354ab321bec1ff66af0e27eb87a7eec3e3db24 (patch)
tree693b43ae7be07be11426faf6e6282d838e426a04 /src/client
parentMake Cache::get_channel return a reference (diff)
downloadserenity-77354ab321bec1ff66af0e27eb87a7eec3e3db24.tar.xz
serenity-77354ab321bec1ff66af0e27eb87a7eec3e3db24.zip
Add a bit more docs
Diffstat (limited to 'src/client')
-rw-r--r--src/client/gateway/error.rs6
-rw-r--r--src/client/gateway/shard.rs48
-rw-r--r--src/client/mod.rs21
3 files changed, 69 insertions, 6 deletions
diff --git a/src/client/gateway/error.rs b/src/client/gateway/error.rs
index fb44d3f..dc94209 100644
--- a/src/client/gateway/error.rs
+++ b/src/client/gateway/error.rs
@@ -1,8 +1,12 @@
use std::fmt::{self, Display};
+/// An error that occurred while attempting to deal with the gateway.
+///
+/// Note that - from a user standpoint - there should be no situation in which
+/// you manually handle these.
#[derive(Clone, Debug)]
pub enum Error {
- /// The connection closed
+ /// The connection unexpectedly (read: non-cleanly) closed.
Closed(Option<u16>, String),
/// Expected a Hello during a handshake
ExpectedHello,
diff --git a/src/client/gateway/shard.rs b/src/client/gateway/shard.rs
index c35fee2..ab2e561 100644
--- a/src/client/gateway/shard.rs
+++ b/src/client/gateway/shard.rs
@@ -32,6 +32,10 @@ type CurrentPresence = (Option<Game>, OnlineStatus, bool);
/// Refer to the [module-level documentation][module docs] for information on
/// effectively using multiple shards, if you need to.
///
+/// Note that there are additional methods available if you are manually
+/// managing a shard yourself, although they are hidden from the documentation
+/// since there are few use cases for doing such.
+///
/// # Stand-alone shards
///
/// You may instantiate a shard yourself - decoupled from the [`Client`] - if
@@ -63,6 +67,8 @@ pub struct Shard {
shard_info: Option<[u8; 2]>,
token: String,
ws_url: String,
+ /// The voice connections that this Shard is responsible for. The Shard will
+ /// update the voice connections' states.
#[cfg(feature = "voice")]
pub manager: VoiceManager,
}
@@ -159,6 +165,13 @@ impl Shard {
}}, ready, receiver))
}
+ /// Retrieves a copy of the current shard information.
+ ///
+ /// The first element is the _current_ shard - 0-indexed - while the second
+ /// element is the _total number_ of shards -- 1-indexed.
+ ///
+ /// For example, if using 3 shards in total, and if this is shard 1, then it
+ /// can be read as "the second of three shards".
pub fn shard_info(&self) -> Option<[u8; 2]> {
self.shard_info
}
@@ -188,6 +201,9 @@ impl Shard {
/// converted to [`Invisible`].
///
/// Other presence settings are maintained.
+ ///
+ /// [`Invisible`]: ../../model/enum.OnlineStatus.html#variant.Invisible
+ /// [`Offline`]: ../../model/enum.OnlineStatus.html#variant.Offline
pub fn set_status(&mut self, online_status: OnlineStatus) {
self.current_presence.1 = match online_status {
OnlineStatus::Offline => OnlineStatus::Invisible,
@@ -232,6 +248,17 @@ impl Shard {
self.update_presence();
}
+ /// Handles an event from the gateway over the receiver, requiring the
+ /// receiver to be passed if a reconnect needs to occur.
+ ///
+ /// The best case scenario is that one of two values is returned:
+ ///
+ /// - `Ok(None)`: a heartbeat, late hello, or session invalidation was
+ /// received;
+ /// - `Ok(Some((event, None)))`: an op0 dispatch was received, and the
+ /// shard's voice state will be updated, _if_ the `voice` feature is
+ /// enabled.
+ #[doc(hidden)]
pub fn handle_event(&mut self,
event: Result<GatewayEvent>,
mut receiver: &mut Receiver<WebSocketStream>)
@@ -318,6 +345,9 @@ impl Shard {
}
}
+ /// Shuts down the receiver by attempting to cleanly close the
+ /// connection.
+ #[doc(hidden)]
pub fn shutdown(&mut self, receiver: &mut Receiver<WebSocketStream>)
-> Result<()> {
let stream = receiver.get_mut().get_mut();
@@ -335,6 +365,9 @@ impl Shard {
Ok(())
}
+ /// Syncs a number of [`Call`]s, given by their associated channel Ids. This
+ /// will allow the current user to know what calls are currently occurring,
+ /// as otherwise events will not be received.
pub fn sync_calls(&self, channels: &[ChannelId]) {
for &channel in channels {
let msg = ObjectBuilder::new()
@@ -348,6 +381,18 @@ impl Shard {
}
}
+ /// Requests that one or multiple [`Guild`]s be synced.
+ ///
+ /// This will ask Discord to start sending member chunks for large guilds
+ /// (250 members+). If a guild is over 250 members, then a full member list
+ /// will not be downloaded, and must instead be requested to be sent in
+ /// "chunks" containing members.
+ ///
+ /// Member chunks are sent as the [`Event::GuildMembersChunk`] event. Each
+ /// chunk only contains a partial amount of the total members.
+ ///
+ /// If the `cache` feature is enabled, the cache will automatically be
+ /// updated with member chunks.
pub fn sync_guilds(&self, guild_ids: &[GuildId]) {
let msg = ObjectBuilder::new()
.insert("op", OpCode::SyncGuild.num())
@@ -383,7 +428,8 @@ impl Shard {
}}
}
- fn reconnect(&mut self, mut receiver: &mut Receiver<WebSocketStream>) -> Result<(Event, Receiver<WebSocketStream>)> {
+ fn reconnect(&mut self, mut receiver: &mut Receiver<WebSocketStream>)
+ -> Result<(Event, Receiver<WebSocketStream>)> {
debug!("Reconnecting");
// Take a few attempts at reconnecting; otherwise fall back to
diff --git a/src/client/mod.rs b/src/client/mod.rs
index 274d438..21b74d4 100644
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -76,24 +76,37 @@ use ::model::event::{
#[cfg(feature = "cache")]
lazy_static! {
- /// The CACHE is a mutable lazily-initialized static binding. It can be
- /// accessed across any function and in any context.
+ /// A mutable and lazily-initialized static binding. It can be accessed
+ /// across any function and in any context.
///
/// This [`Cache`] instance is updated for every event received, so you do
/// not need to maintain your own cache.
///
/// See the [cache module documentation] for more details.
///
+ /// The Cache itself is wrapped within an `RwLock`, which allows for
+ /// multiple readers or at most one writer at a time across threads. This
+ /// means that you may have multiple commands reading from the Cache
+ /// concurrently.
+ ///
/// # Examples
///
- /// Retrieve the [current user][`CurrentUser`]'s Id:
+ /// Retrieve the [current user][`CurrentUser`]'s Id, by opening a Read
+ /// guard:
///
/// ```rust,ignore
/// use serenity::client::CACHE;
///
- /// println!("{}", CACHE.lock().unwrap().user.id);
+ /// println!("{}", CACHE.read().unwrap().user.id);
/// ```
///
+ /// By `unwrap()`ing, the thread managing an event dispatch will be blocked
+ /// until the guard can be opened.
+ ///
+ /// If you do not want to block the current thread, you may instead use
+ /// `RwLock::try_read`. Refer to `RwLock`'s documentation in the stdlib for
+ /// more information.
+ ///
/// [`CurrentUser`]: ../model/struct.CurrentUser.html
/// [`Cache`]: ../ext/cache/struct.Cache.html
/// [cache module documentation]: ../ext/cache/index.html