8000 Update derive_more requirement from 0.99 to 2.0 by cc90202 · Pull Request #289 · rabbitmq/rabbitmq-stream-rust-client · GitHub
[go: up one dir, main page]

Skip to content

Update derive_more requirement from 0.99 to 2.0 #289

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 5 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion protocol/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ ordered-float = "4.1.0"
uuid = "1"
chrono = "0.4.26"
num_enum = "0.7.0"
derive_more = "0.99"
derive_more = { version = "2.0.1", features = ["full"] }

[dev-dependencies]
pretty_assertions = "1.2.0"
Expand Down
6 changes: 3 additions & 3 deletions protocol/src/message/amqp/types/primitives/value.rs
Original file line number Diff line number Diff line change
Expand Up @@ -180,9 +180,9 @@
fn try_from(value: &'a Value) -> Result<Self, Self::Error> {
use std::convert::TryInto;
match value {
Value::Simple(simple) => simple
.try_into()
.map_err(|err: &str| DecodeError::MessageParse(err.to_string())),
Value::Simple(simple) => simple.try_into().map_err(|_| {
DecodeError::MessageParse("Failed to cast Value to simple type".to_string())
}),

Check warning on line 185 in protocol/src/message/amqp/types/primitives/value.rs

View check run for this annotation

Codecov / codecov/patch

protocol/src/message/amqp/types/primitives/value.rs#L183-L185

Added lines #L183 - L185 were not covered by tests
_ => Err(DecodeError::MessageParse(
"Failed to cast Value to simple type".to_string(),
)),
Expand Down
2 changes: 1 addition & 1 deletion src/client/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ mod task;
#[derive(Debug)]
pub enum GenericTcpStream {
Tcp(#[pin] TcpStream),
SecureTcp(#[pin] TlsStream<TcpStream>),
SecureTcp(#[pin] Box<TlsStream<TcpStream>>),
}

impl AsyncRead for GenericTcpStream {
Expand Down
2 changes: 1 addition & 1 deletion src/client/options.rs
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ impl ClientOptions {
let domain = ServerName::try_from(host.clone()).unwrap();
let connector = TlsConnector::from(Arc::new(config));
let conn = connector.connect(domain, stream).await?;
Ok(GenericTcpStream::SecureTcp(conn))
Ok(GenericTcpStream::SecureTcp(Box::new(conn)))
}
match &self.tls {
TlsConfiguration::Trusted {
Expand Down
179 changes: 80 additions & 99 deletions src/producer.rs
< 8000 /tr>
Original file line number Diff line number Diff line change
@@ -1,19 +1,20 @@
use futures::executor::block_on;
use std::future::Future;
use std::time::Duration;
use std::{
marker::PhantomData,
sync::{
atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering},
atomic::{AtomicBool, AtomicU64, Ordering},
Arc,
},
};

use dashmap::DashMap;
use futures::{future::BoxFuture, FutureExt};
use tokio::sync::mpsc;
use tokio::sync::mpsc::channel;
use tokio::sync::{mpsc, Mutex};
use tokio::time::sleep;
use tracing::{debug, error, trace};
use tracing::{error, info, trace};

use rabbitmq_stream_protocol::{message::Message, ResponseCode, ResponseKind};

Expand Down Expand Up @@ -60,18 +61,49 @@
}

pub struct ProducerInternal {
client: Client,
client: Arc<Client>,
stream: String,
producer_id: u8,
batch_size: usize,
publish_sequence: Arc<AtomicU64>,
waiting_confirmations: WaiterMap,
closed: Arc<AtomicBool>,
accumulator: MessageAccumulator,
publish_version: u16,
sender: mpsc::Sender<ClientMessage>,
filter_value_extractor: Option<FilterValueExtractor>,
}

impl Drop for ProducerInternal {
fn drop(&mut self) {
block_on(async {
if let Err(e) = self.close().await {
error!(error = ?e, "Error closing producer");

Check warning on line 78 in src/producer.rs

View check run for this annotation

Codecov / codecov/patch

src/producer.rs#L78

Added line #L78 was not covered by tests
}
});
}
}

impl ProducerInternal {
pub async fn close(&self) -> Result<(), ProducerCloseError> {
match self
.closed
.compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst)
{
Ok(false) => {
let response = self.client.delete_publisher(self.producer_id).await?;
if response.is_ok() {
self.client.close().await?;
Ok(())
} else {
Err(ProducerCloseError::Close {
status: response.code().clone(),
stream: self.stream.clone(),
})

Check warning on line 99 in src/producer.rs

View check run for this annotation

Codecov / codecov/patch

src/producer.rs#L96-L99

Added lines #L96 - L99 were not covered by tests
}
}
_ => Ok(()), // Already closed
}
}
}

/// API for publising messages to RabbitMQ stream
#[derive(Clone)]
pub struct Producer<T>(Arc<ProducerInternal>, PhantomData<T>);
Expand Down Expand Up @@ -139,22 +171,29 @@
};

if response.is_ok() {
let (sender, receiver) = mpsc::channel(self.batch_size);

let client = Arc::new(client);
let producer = ProducerInternal {
producer_id,
batch_size: self.batch_size,
stream: stream.to_string(),
client,
publish_sequence,
waiting_confirmations,
publish_version,
closed: Arc::new(AtomicBool::new(false)),
accumulator: MessageAccumulator::new(self.batch_size),
sender,
filter_value_extractor: self.filter_value_extractor,
};

let internal_producer = Arc::new(producer);
let producer = Producer(internal_producer.clone(), PhantomData);
schedule_batch_send(internal_producer);
schedule_batch_send(
self.batch_size,
receiver,
internal_producer.client.clone(),
producer_id,
publish_version,
);
let producer = Producer(internal_producer, PhantomData);

Ok(producer)
} else {
Expand Down Expand Up @@ -205,78 +244,33 @@
}
}

pub struct MessageAccumulator {
sender: mpsc::Sender<ClientMessage>,
receiver: Mutex<mpsc::Receiver<ClientMessage>>,
message_count: AtomicUsize,
}

impl MessageAccumulator {
pub fn new(batch_size: usize) -> Self {
let (sender, receiver) = mpsc::channel(batch_size);
Self {
sender,
receiver: Mutex::new(receiver),
message_count: AtomicUsize::new(0),
}
}

pub async fn add(&self, message: ClientMessage) -> RabbitMQStreamResult<()> {
match self.sender.send(message).await {
Ok(_) => {
self.message_count.fetch_add(1, Ordering::Relaxed);
Ok(())
}
Err(e) => Err(ClientError::GenericError(Box::new(e))),
}
}

pub async fn get(&self, buffer: &mut Vec<ClientMessage>, batch_size: usize) -> (bool, usize) {
let mut receiver = self.receiver.lock().await;

let count = receiver.recv_many(buffer, batch_size).await;
self.message_count.fetch_sub(count, Ordering::Relaxed);

// `recv_many` returns 0 only if the channel is closed
// Read https://docs.rs/tokio/latest/tokio/sync/mpsc/struct.Receiver.html#method.recv_many
(count == 0, count)
}
}

fn schedule_batch_send(producer: Arc<ProducerInternal>) {
fn schedule_batch_send(
batch_size: usize,
mut receiver: mpsc::Receiver<ClientMessage>,
client: Arc<Client>,
producer_id: u8,
publish_version: u16,
) {
tokio::task::spawn(async move {
let mut buffer = Vec::with_capacity(producer.batch_size);
let mut buffer = Vec::with_capacity(batch_size);
loop {
let (is_closed, count) = producer
.accumulator
.get(&mut buffer, producer.batch_size)
.await;
let count = receiver.recv_many(&mut buffer, batch_size).await;

if is_closed {
error!("Channel is closed and this is bad");
if count == 0 || buffer.is_empty() {
// Channel is closed, exit the loop
break;
}

if count > 0 {
debug!("Sending batch of {} messages", count);
let messages: Vec<_> = buffer.drain(..count).collect();
match producer
.client
.publish(producer.producer_id, messages, producer.publish_version)
.await
{
Ok(_) => {}
Err(e) => {
error!("Error publishing batch {:?}", e);

// Stop loop if producer is closed
if producer.closed.load(Ordering::Relaxed) {
break;
}
}
};
}
let messages: Vec<_> = buffer.drain(..count).collect();
match client.publish(producer_id, messages, publish_version).await {
Ok(_) => {}
Err(e) => {
error!("Error publishing batch {:?}", e);

Check warning on line 268 in src/producer.rs

View check run for this annotation

Codecov / codecov/patch

src/producer.rs#L267-L268

Added lines #L267 - L268 were not covered by tests
}
};
}

info!("Batch send task finished");
});
}

Expand Down Expand Up @@ -455,10 +449,13 @@
.waiting_confirmations
.insert(publishing_id, ProducerMessageWaiter::Once(waiter));

self.0.accumulator.add(msg).await?;
if let Err(e) = self.0.sender.send(msg).await {
return Err(ClientError::GenericError(Box::new(e)))?;

Check warning on line 453 in src/producer.rs

View check run for this annotation

Codecov / codecov/patch

src/producer.rs#L453

Added line #L453 was not covered by tests
}

Ok(())
}

async fn internal_batch_send<Fut>(
&self,
messages: Vec<Message>,
Expand Down Expand Up @@ -488,7 +485,9 @@
}

// Queue the message for sending
self.0.accumulator.add(client_message).await?;
if let Err(e) = self.0.sender.send(client_message).await {
return Err(ClientError::GenericError(Box::new(e)))?;

Check warning on line 489 in src/producer.rs

View check run for this annotation

Codecov / codecov/patch

src/producer.rs#L489

Added line #L489 was not covered by tests
}
self.0
.waiting_confirmations
.insert(publishing_id, ProducerMessageWaiter::Shared(waiter.clone()));
Expand All @@ -500,27 +499,9 @@
pub fn is_closed(&self) -> bool {
self.0.closed.load(Ordering::Relaxed)
}
// TODO handle producer state after close

pub async fn close(self) -> Result<(), ProducerCloseError> {
match self
.0
.closed
.compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst)
{
Ok(false) => {
let response = self.0.client.delete_publisher(self.0.producer_id).await?;
if response.is_ok() {
self.0.client.close().await?;
Ok(())
} else {
Err(ProducerCloseError::Close {
status: response.code().clone(),
stream: self.0.stream.clone(),
})
}
}
_ => Err(ProducerCloseError::AlreadyClosed),
}
self.0.close().await
}
}

Expand Down
10000
7 changes: 7 additions & 0 deletions tests/client_test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -462,6 +462,13 @@ async fn client_test_route_test() {
async fn client_close() {
let test = TestClient::create().await;

let output = test
.client
.metadata(vec![test.stream.clone()])
.await
.unwrap();
assert_ne!(output.len(), 0);

test.client
.close()
.await
Expand Down
6 changes: 0 additions & 6 deletions tests/consumer_test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -221,12 +221,6 @@ async fn consumer_close_test() {
consumer.handle().close().await,
Err(ConsumerCloseError::AlreadyClosed),
));
producer.clone().close().await.unwrap();

assert!(matches!(
producer.close().await,
Err(ProducerCloseError::AlreadyClosed),
));
}

#[tokio::test(flavor = "multi_thread")]
Expand Down
Loading
Loading
0