polars_core/series/implementations/
mod.rs1#![allow(unsafe_op_in_unsafe_fn)]
2#[cfg(feature = "dtype-array")]
3mod array;
4mod binary;
5mod binary_offset;
6mod boolean;
7#[cfg(feature = "dtype-categorical")]
8mod categorical;
9#[cfg(feature = "dtype-date")]
10mod date;
11#[cfg(feature = "dtype-datetime")]
12mod datetime;
13#[cfg(feature = "dtype-decimal")]
14mod decimal;
15#[cfg(feature = "dtype-duration")]
16mod duration;
17mod floats;
18mod list;
19pub(crate) mod null;
20#[cfg(feature = "object")]
21mod object;
22mod string;
23#[cfg(feature = "dtype-struct")]
24mod struct_;
25#[cfg(feature = "dtype-time")]
26mod time;
27
28use std::any::Any;
29use std::borrow::Cow;
30
31use polars_compute::rolling::QuantileMethod;
32use polars_utils::aliases::PlSeedableRandomStateQuality;
33
34use super::*;
35use crate::chunked_array::AsSinglePtr;
36use crate::chunked_array::comparison::*;
37use crate::chunked_array::ops::compare_inner::{
38 IntoTotalEqInner, IntoTotalOrdInner, TotalEqInner, TotalOrdInner,
39};
40
41pub(crate) struct SeriesWrap<T>(pub T);
43
44impl<T: PolarsDataType> From<ChunkedArray<T>> for SeriesWrap<ChunkedArray<T>> {
45 fn from(ca: ChunkedArray<T>) -> Self {
46 SeriesWrap(ca)
47 }
48}
49
50impl<T: PolarsDataType> Deref for SeriesWrap<ChunkedArray<T>> {
51 type Target = ChunkedArray<T>;
52
53 fn deref(&self) -> &Self::Target {
54 &self.0
55 }
56}
57
58unsafe impl<T: PolarsPhysicalType> IntoSeries for ChunkedArray<T> {
59 fn into_series(self) -> Series {
60 T::ca_into_series(self)
61 }
62}
63
64macro_rules! impl_dyn_series {
65 ($ca: ident, $pdt:ty) => {
66 impl private::PrivateSeries for SeriesWrap<$ca> {
67 fn compute_len(&mut self) {
68 self.0.compute_len()
69 }
70
71 fn _field(&self) -> Cow<'_, Field> {
72 Cow::Borrowed(self.0.ref_field())
73 }
74
75 fn _dtype(&self) -> &DataType {
76 self.0.ref_field().dtype()
77 }
78
79 fn _get_flags(&self) -> StatisticsFlags {
80 self.0.get_flags()
81 }
82
83 fn _set_flags(&mut self, flags: StatisticsFlags) {
84 self.0.set_flags(flags)
85 }
86
87 unsafe fn equal_element(
88 &self,
89 idx_self: usize,
90 idx_other: usize,
91 other: &Series,
92 ) -> bool {
93 self.0.equal_element(idx_self, idx_other, other)
94 }
95
96 #[cfg(feature = "zip_with")]
97 fn zip_with_same_type(
98 &self,
99 mask: &BooleanChunked,
100 other: &Series,
101 ) -> PolarsResult<Series> {
102 ChunkZip::zip_with(&self.0, mask, other.as_ref().as_ref())
103 .map(|ca| ca.into_series())
104 }
105 fn into_total_eq_inner<'a>(&'a self) -> Box<dyn TotalEqInner + 'a> {
106 (&self.0).into_total_eq_inner()
107 }
108 fn into_total_ord_inner<'a>(&'a self) -> Box<dyn TotalOrdInner + 'a> {
109 (&self.0).into_total_ord_inner()
110 }
111
112 fn vec_hash(
113 &self,
114 random_state: PlSeedableRandomStateQuality,
115 buf: &mut Vec<u64>,
116 ) -> PolarsResult<()> {
117 self.0.vec_hash(random_state, buf)?;
118 Ok(())
119 }
120
121 fn vec_hash_combine(
122 &self,
123 build_hasher: PlSeedableRandomStateQuality,
124 hashes: &mut [u64],
125 ) -> PolarsResult<()> {
126 self.0.vec_hash_combine(build_hasher, hashes)?;
127 Ok(())
128 }
129
130 #[cfg(feature = "algorithm_group_by")]
131 unsafe fn agg_min(&self, groups: &GroupsType) -> Series {
132 self.0.agg_min(groups)
133 }
134
135 #[cfg(feature = "algorithm_group_by")]
136 unsafe fn agg_max(&self, groups: &GroupsType) -> Series {
137 self.0.agg_max(groups)
138 }
139
140 #[cfg(feature = "algorithm_group_by")]
141 unsafe fn agg_sum(&self, groups: &GroupsType) -> Series {
142 use DataType::*;
143 match self.dtype() {
144 Int8 | UInt8 | Int16 | UInt16 => self
145 .cast(&Int64, CastOptions::Overflowing)
146 .unwrap()
147 .agg_sum(groups),
148 _ => self.0.agg_sum(groups),
149 }
150 }
151
152 #[cfg(feature = "algorithm_group_by")]
153 unsafe fn agg_std(&self, groups: &GroupsType, ddof: u8) -> Series {
154 self.0.agg_std(groups, ddof)
155 }
156
157 #[cfg(feature = "algorithm_group_by")]
158 unsafe fn agg_var(&self, groups: &GroupsType, ddof: u8) -> Series {
159 self.0.agg_var(groups, ddof)
160 }
161
162 #[cfg(feature = "algorithm_group_by")]
163 unsafe fn agg_list(&self, groups: &GroupsType) -> Series {
164 self.0.agg_list(groups)
165 }
166
167 #[cfg(feature = "bitwise")]
168 unsafe fn agg_and(&self, groups: &GroupsType) -> Series {
169 self.0.agg_and(groups)
170 }
171 #[cfg(feature = "bitwise")]
172 unsafe fn agg_or(&self, groups: &GroupsType) -> Series {
173 self.0.agg_or(groups)
174 }
175 #[cfg(feature = "bitwise")]
176 unsafe fn agg_xor(&self, groups: &GroupsType) -> Series {
177 self.0.agg_xor(groups)
178 }
179
180 fn subtract(&self, rhs: &Series) -> PolarsResult<Series> {
181 NumOpsDispatch::subtract(&self.0, rhs)
182 }
183 fn add_to(&self, rhs: &Series) -> PolarsResult<Series> {
184 NumOpsDispatch::add_to(&self.0, rhs)
185 }
186 fn multiply(&self, rhs: &Series) -> PolarsResult<Series> {
187 NumOpsDispatch::multiply(&self.0, rhs)
188 }
189 fn divide(&self, rhs: &Series) -> PolarsResult<Series> {
190 NumOpsDispatch::divide(&self.0, rhs)
191 }
192 fn remainder(&self, rhs: &Series) -> PolarsResult<Series> {
193 NumOpsDispatch::remainder(&self.0, rhs)
194 }
195 #[cfg(feature = "algorithm_group_by")]
196 fn group_tuples(&self, multithreaded: bool, sorted: bool) -> PolarsResult<GroupsType> {
197 IntoGroupsType::group_tuples(&self.0, multithreaded, sorted)
198 }
199
200 fn arg_sort_multiple(
201 &self,
202 by: &[Column],
203 options: &SortMultipleOptions,
204 ) -> PolarsResult<IdxCa> {
205 self.0.arg_sort_multiple(by, options)
206 }
207 }
208
209 impl SeriesTrait for SeriesWrap<$ca> {
210 #[cfg(feature = "rolling_window")]
211 fn rolling_map(
212 &self,
213 _f: &dyn Fn(&Series) -> PolarsResult<Series>,
214 _options: RollingOptionsFixedWindow,
215 ) -> PolarsResult<Series> {
216 ChunkRollApply::rolling_map(&self.0, _f, _options).map(|ca| ca.into_series())
217 }
218
219 fn rename(&mut self, name: PlSmallStr) {
220 self.0.rename(name);
221 }
222
223 fn chunk_lengths(&self) -> ChunkLenIter<'_> {
224 self.0.chunk_lengths()
225 }
226 fn name(&self) -> &PlSmallStr {
227 self.0.name()
228 }
229
230 fn chunks(&self) -> &Vec<ArrayRef> {
231 self.0.chunks()
232 }
233 unsafe fn chunks_mut(&mut self) -> &mut Vec<ArrayRef> {
234 self.0.chunks_mut()
235 }
236 fn shrink_to_fit(&mut self) {
237 self.0.shrink_to_fit()
238 }
239
240 fn slice(&self, offset: i64, length: usize) -> Series {
241 self.0.slice(offset, length).into_series()
242 }
243
244 fn split_at(&self, offset: i64) -> (Series, Series) {
245 let (a, b) = self.0.split_at(offset);
246 (a.into_series(), b.into_series())
247 }
248
249 fn append(&mut self, other: &Series) -> PolarsResult<()> {
250 polars_ensure!(self.0.dtype() == other.dtype(), append);
251 self.0.append(other.as_ref().as_ref())?;
252 Ok(())
253 }
254 fn append_owned(&mut self, other: Series) -> PolarsResult<()> {
255 polars_ensure!(self.0.dtype() == other.dtype(), append);
256 self.0.append_owned(other.take_inner())
257 }
258
259 fn extend(&mut self, other: &Series) -> PolarsResult<()> {
260 polars_ensure!(self.0.dtype() == other.dtype(), extend);
261 self.0.extend(other.as_ref().as_ref())?;
262 Ok(())
263 }
264
265 fn filter(&self, filter: &BooleanChunked) -> PolarsResult<Series> {
266 ChunkFilter::filter(&self.0, filter).map(|ca| ca.into_series())
267 }
268
269 fn _sum_as_f64(&self) -> f64 {
270 self.0._sum_as_f64()
271 }
272
273 fn mean(&self) -> Option<f64> {
274 self.0.mean()
275 }
276
277 fn median(&self) -> Option<f64> {
278 self.0.median()
279 }
280
281 fn std(&self, ddof: u8) -> Option<f64> {
282 self.0.std(ddof)
283 }
284
285 fn var(&self, ddof: u8) -> Option<f64> {
286 self.0.var(ddof)
287 }
288
289 fn take(&self, indices: &IdxCa) -> PolarsResult<Series> {
290 Ok(self.0.take(indices)?.into_series())
291 }
292
293 unsafe fn take_unchecked(&self, indices: &IdxCa) -> Series {
294 self.0.take_unchecked(indices).into_series()
295 }
296
297 fn take_slice(&self, indices: &[IdxSize]) -> PolarsResult<Series> {
298 Ok(self.0.take(indices)?.into_series())
299 }
300
301 unsafe fn take_slice_unchecked(&self, indices: &[IdxSize]) -> Series {
302 self.0.take_unchecked(indices).into_series()
303 }
304
305 fn len(&self) -> usize {
306 self.0.len()
307 }
308
309 fn rechunk(&self) -> Series {
310 self.0.rechunk().into_owned().into_series()
311 }
312
313 fn new_from_index(&self, index: usize, length: usize) -> Series {
314 ChunkExpandAtIndex::new_from_index(&self.0, index, length).into_series()
315 }
316
317 fn cast(&self, dtype: &DataType, options: CastOptions) -> PolarsResult<Series> {
318 self.0.cast_with_options(dtype, options)
319 }
320
321 #[inline]
322 unsafe fn get_unchecked(&self, index: usize) -> AnyValue<'_> {
323 self.0.get_any_value_unchecked(index)
324 }
325
326 fn sort_with(&self, options: SortOptions) -> PolarsResult<Series> {
327 Ok(ChunkSort::sort_with(&self.0, options).into_series())
328 }
329
330 fn arg_sort(&self, options: SortOptions) -> IdxCa {
331 ChunkSort::arg_sort(&self.0, options)
332 }
333
334 fn null_count(&self) -> usize {
335 self.0.null_count()
336 }
337
338 fn has_nulls(&self) -> bool {
339 self.0.has_nulls()
340 }
341
342 #[cfg(feature = "algorithm_group_by")]
343 fn unique(&self) -> PolarsResult<Series> {
344 ChunkUnique::unique(&self.0).map(|ca| ca.into_series())
345 }
346
347 #[cfg(feature = "algorithm_group_by")]
348 fn n_unique(&self) -> PolarsResult<usize> {
349 ChunkUnique::n_unique(&self.0)
350 }
351
352 #[cfg(feature = "algorithm_group_by")]
353 fn arg_unique(&self) -> PolarsResult<IdxCa> {
354 ChunkUnique::arg_unique(&self.0)
355 }
356
357 fn is_null(&self) -> BooleanChunked {
358 self.0.is_null()
359 }
360
361 fn is_not_null(&self) -> BooleanChunked {
362 self.0.is_not_null()
363 }
364
365 fn reverse(&self) -> Series {
366 ChunkReverse::reverse(&self.0).into_series()
367 }
368
369 fn as_single_ptr(&mut self) -> PolarsResult<usize> {
370 self.0.as_single_ptr()
371 }
372
373 fn shift(&self, periods: i64) -> Series {
374 ChunkShift::shift(&self.0, periods).into_series()
375 }
376
377 fn sum_reduce(&self) -> PolarsResult<Scalar> {
378 Ok(ChunkAggSeries::sum_reduce(&self.0))
379 }
380 fn max_reduce(&self) -> PolarsResult<Scalar> {
381 Ok(ChunkAggSeries::max_reduce(&self.0))
382 }
383 fn min_reduce(&self) -> PolarsResult<Scalar> {
384 Ok(ChunkAggSeries::min_reduce(&self.0))
385 }
386 fn median_reduce(&self) -> PolarsResult<Scalar> {
387 Ok(QuantileAggSeries::median_reduce(&self.0))
388 }
389 fn var_reduce(&self, ddof: u8) -> PolarsResult<Scalar> {
390 Ok(VarAggSeries::var_reduce(&self.0, ddof))
391 }
392 fn std_reduce(&self, ddof: u8) -> PolarsResult<Scalar> {
393 Ok(VarAggSeries::std_reduce(&self.0, ddof))
394 }
395 fn quantile_reduce(
396 &self,
397 quantile: f64,
398 method: QuantileMethod,
399 ) -> PolarsResult<Scalar> {
400 QuantileAggSeries::quantile_reduce(&self.0, quantile, method)
401 }
402
403 #[cfg(feature = "bitwise")]
404 fn and_reduce(&self) -> PolarsResult<Scalar> {
405 let dt = <$pdt as PolarsDataType>::get_static_dtype();
406 let av = self.0.and_reduce().map_or(AnyValue::Null, Into::into);
407
408 Ok(Scalar::new(dt, av))
409 }
410
411 #[cfg(feature = "bitwise")]
412 fn or_reduce(&self) -> PolarsResult<Scalar> {
413 let dt = <$pdt as PolarsDataType>::get_static_dtype();
414 let av = self.0.or_reduce().map_or(AnyValue::Null, Into::into);
415
416 Ok(Scalar::new(dt, av))
417 }
418
419 #[cfg(feature = "bitwise")]
420 fn xor_reduce(&self) -> PolarsResult<Scalar> {
421 let dt = <$pdt as PolarsDataType>::get_static_dtype();
422 let av = self.0.xor_reduce().map_or(AnyValue::Null, Into::into);
423
424 Ok(Scalar::new(dt, av))
425 }
426
427 #[cfg(feature = "approx_unique")]
428 fn approx_n_unique(&self) -> PolarsResult<IdxSize> {
429 Ok(ChunkApproxNUnique::approx_n_unique(&self.0))
430 }
431
432 fn clone_inner(&self) -> Arc<dyn SeriesTrait> {
433 Arc::new(SeriesWrap(Clone::clone(&self.0)))
434 }
435
436 fn find_validity_mismatch(&self, other: &Series, idxs: &mut Vec<IdxSize>) {
437 self.0.find_validity_mismatch(other, idxs)
438 }
439
440 #[cfg(feature = "checked_arithmetic")]
441 fn checked_div(&self, rhs: &Series) -> PolarsResult<Series> {
442 self.0.checked_div(rhs)
443 }
444
445 fn as_any(&self) -> &dyn Any {
446 &self.0
447 }
448
449 fn as_any_mut(&mut self) -> &mut dyn Any {
450 &mut self.0
451 }
452
453 fn as_phys_any(&self) -> &dyn Any {
454 &self.0
455 }
456
457 fn as_arc_any(self: Arc<Self>) -> Arc<dyn Any + Send + Sync> {
458 self as _
459 }
460 }
461 };
462}
463
464#[cfg(feature = "dtype-u8")]
465impl_dyn_series!(UInt8Chunked, UInt8Type);
466#[cfg(feature = "dtype-u16")]
467impl_dyn_series!(UInt16Chunked, UInt16Type);
468impl_dyn_series!(UInt32Chunked, UInt32Type);
469impl_dyn_series!(UInt64Chunked, UInt64Type);
470#[cfg(feature = "dtype-i8")]
471impl_dyn_series!(Int8Chunked, Int8Type);
472#[cfg(feature = "dtype-i16")]
473impl_dyn_series!(Int16Chunked, Int16Type);
474impl_dyn_series!(Int32Chunked, Int32Type);
475impl_dyn_series!(Int64Chunked, Int64Type);
476#[cfg(feature = "dtype-i128")]
477impl_dyn_series!(Int128Chunked, Int128Type);
478
479impl<T: PolarsNumericType> private::PrivateSeriesNumeric for SeriesWrap<ChunkedArray<T>> {
480 fn bit_repr(&self) -> Option<BitRepr> {
481 Some(self.0.to_bit_repr())
482 }
483}
484
485impl private::PrivateSeriesNumeric for SeriesWrap<StringChunked> {
486 fn bit_repr(&self) -> Option<BitRepr> {
487 None
488 }
489}
490impl private::PrivateSeriesNumeric for SeriesWrap<BinaryChunked> {
491 fn bit_repr(&self) -> Option<BitRepr> {
492 None
493 }
494}
495impl private::PrivateSeriesNumeric for SeriesWrap<BinaryOffsetChunked> {
496 fn bit_repr(&self) -> Option<BitRepr> {
497 None
498 }
499}
500impl private::PrivateSeriesNumeric for SeriesWrap<ListChunked> {
501 fn bit_repr(&self) -> Option<BitRepr> {
502 None
503 }
504}
505#[cfg(feature = "dtype-array")]
506impl private::PrivateSeriesNumeric for SeriesWrap<ArrayChunked> {
507 fn bit_repr(&self) -> Option<BitRepr> {
508 None
509 }
510}
511impl private::PrivateSeriesNumeric for SeriesWrap<BooleanChunked> {
512 fn bit_repr(&self) -> Option<BitRepr> {
513 let repr = self
514 .0
515 .cast_with_options(&DataType::UInt32, CastOptions::NonStrict)
516 .unwrap()
517 .u32()
518 .unwrap()
519 .clone();
520
521 Some(BitRepr::U32(repr))
522 }
523}