Struct polars::frame::DataFrame

source ·
pub struct DataFrame { /* private fields */ }
Expand description

A contiguous growable collection of Series that have the same length.

§Use declarations

All the common tools can be found in crate::prelude (or in polars::prelude).

use polars_core::prelude::*; // if the crate polars-core is used directly
// use polars::prelude::*;      if the crate polars is used

§Initialization

§Default

A DataFrame can be initialized empty:

let df = DataFrame::default();
assert!(df.is_empty());

§Wrapping a Vec<Series>

A DataFrame is built upon a Vec<Series> where the Series have the same length.

let s1 = Series::new("Fruit", &["Apple", "Apple", "Pear"]);
let s2 = Series::new("Color", &["Red", "Yellow", "Green"]);

let df: PolarsResult<DataFrame> = DataFrame::new(vec![s1, s2]);

§Using a macro

The df! macro is a convenient method:

let df: PolarsResult<DataFrame> = df!("Fruit" => &["Apple", "Apple", "Pear"],
                                      "Color" => &["Red", "Yellow", "Green"]);

§Using a CSV file

See the polars_io::csv::CsvReader.

§Indexing

§By a number

The Index<usize> is implemented for the DataFrame.

let df = df!("Fruit" => &["Apple", "Apple", "Pear"],
             "Color" => &["Red", "Yellow", "Green"])?;

assert_eq!(df[0], Series::new("Fruit", &["Apple", "Apple", "Pear"]));
assert_eq!(df[1], Series::new("Color", &["Red", "Yellow", "Green"]));

§By a Series name

let df = df!("Fruit" => &["Apple", "Apple", "Pear"],
             "Color" => &["Red", "Yellow", "Green"])?;

assert_eq!(df["Fruit"], Series::new("Fruit", &["Apple", "Apple", "Pear"]));
assert_eq!(df["Color"], Series::new("Color", &["Red", "Yellow", "Green"]));

Implementations§

source§

impl DataFrame

source

pub fn to_ndarray<N>( &self, ordering: IndexOrder ) -> Result<ArrayBase<OwnedRepr<<N as PolarsNumericType>::Native>, Dim<[usize; 2]>>, PolarsError>

Create a 2D ndarray::Array from this DataFrame. This requires all columns in the DataFrame to be non-null and numeric. They will be casted to the same data type (if they aren’t already).

For floating point data we implicitly convert None to NaN without failure.

use polars_core::prelude::*;
let a = UInt32Chunked::new("a", &[1, 2, 3]).into_series();
let b = Float64Chunked::new("b", &[10., 8., 6.]).into_series();

let df = DataFrame::new(vec![a, b]).unwrap();
let ndarray = df.to_ndarray::<Float64Type>(IndexOrder::Fortran).unwrap();
println!("{:?}", ndarray);

Outputs:

[[1.0, 10.0],
 [2.0, 8.0],
 [3.0, 6.0]], shape=[3, 2], strides=[1, 3], layout=Ff (0xa), const ndim=2
source§

impl DataFrame

source

pub fn into_struct(self, name: &str) -> StructChunked

source§

impl DataFrame

source

pub fn sample_n( &self, n: &Series, with_replacement: bool, shuffle: bool, seed: Option<u64> ) -> Result<DataFrame, PolarsError>

Sample n datapoints from this DataFrame.

source

pub fn sample_n_literal( &self, n: usize, with_replacement: bool, shuffle: bool, seed: Option<u64> ) -> Result<DataFrame, PolarsError>

source

pub fn sample_frac( &self, frac: &Series, with_replacement: bool, shuffle: bool, seed: Option<u64> ) -> Result<DataFrame, PolarsError>

Sample a fraction between 0.0-1.0 of this DataFrame.

source§

impl DataFrame

source

pub fn explode_impl( &self, columns: Vec<Series> ) -> Result<DataFrame, PolarsError>

source

pub fn explode<I, S>(&self, columns: I) -> Result<DataFrame, PolarsError>
where I: IntoIterator<Item = S>, S: AsRef<str>,

Explode DataFrame to long format by exploding a column with Lists.

§Example
let s0 = Series::new("a", &[1i64, 2, 3]);
let s1 = Series::new("b", &[1i64, 1, 1]);
let s2 = Series::new("c", &[2i64, 2, 2]);
let list = Series::new("foo", &[s0, s1, s2]);

let s0 = Series::new("B", [1, 2, 3]);
let s1 = Series::new("C", [1, 1, 1]);
let df = DataFrame::new(vec![list, s0, s1])?;
let exploded = df.explode(["foo"])?;

println!("{:?}", df);
println!("{:?}", exploded);

Outputs:

 +-------------+-----+-----+
 | foo         | B   | C   |
 | ---         | --- | --- |
 | list [i64]  | i32 | i32 |
 +=============+=====+=====+
 | "[1, 2, 3]" | 1   | 1   |
 +-------------+-----+-----+
 | "[1, 1, 1]" | 2   | 1   |
 +-------------+-----+-----+
 | "[2, 2, 2]" | 3   | 1   |
 +-------------+-----+-----+

 +-----+-----+-----+
 | foo | B   | C   |
 | --- | --- | --- |
 | i64 | i32 | i32 |
 +=====+=====+=====+
 | 1   | 1   | 1   |
 +-----+-----+-----+
 | 2   | 1   | 1   |
 +-----+-----+-----+
 | 3   | 1   | 1   |
 +-----+-----+-----+
 | 1   | 2   | 1   |
 +-----+-----+-----+
 | 1   | 2   | 1   |
 +-----+-----+-----+
 | 1   | 2   | 1   |
 +-----+-----+-----+
 | 2   | 3   | 1   |
 +-----+-----+-----+
 | 2   | 3   | 1   |
 +-----+-----+-----+
 | 2   | 3   | 1   |
 +-----+-----+-----+
source

pub fn melt<I, J>( &self, id_vars: I, value_vars: J ) -> Result<DataFrame, PolarsError>
where I: IntoVec<SmartString<LazyCompact>>, J: IntoVec<SmartString<LazyCompact>>,

Unpivot a DataFrame from wide to long format.

§Example
§Arguments
  • id_vars - String slice that represent the columns to use as id variables.
  • value_vars - String slice that represent the columns to use as value variables.

If value_vars is empty all columns that are not in id_vars will be used.

let df = df!("A" => &["a", "b", "a"],
             "B" => &[1, 3, 5],
             "C" => &[10, 11, 12],
             "D" => &[2, 4, 6]
    )?;

let melted = df.melt(&["A", "B"], &["C", "D"])?;
println!("{:?}", df);
println!("{:?}", melted);

Outputs:

 +-----+-----+-----+-----+
 | A   | B   | C   | D   |
 | --- | --- | --- | --- |
 | str | i32 | i32 | i32 |
 +=====+=====+=====+=====+
 | "a" | 1   | 10  | 2   |
 +-----+-----+-----+-----+
 | "b" | 3   | 11  | 4   |
 +-----+-----+-----+-----+
 | "a" | 5   | 12  | 6   |
 +-----+-----+-----+-----+

 +-----+-----+----------+-------+
 | A   | B   | variable | value |
 | --- | --- | ---      | ---   |
 | str | i32 | str      | i32   |
 +=====+=====+==========+=======+
 | "a" | 1   | "C"      | 10    |
 +-----+-----+----------+-------+
 | "b" | 3   | "C"      | 11    |
 +-----+-----+----------+-------+
 | "a" | 5   | "C"      | 12    |
 +-----+-----+----------+-------+
 | "a" | 1   | "D"      | 2     |
 +-----+-----+----------+-------+
 | "b" | 3   | "D"      | 4     |
 +-----+-----+----------+-------+
 | "a" | 5   | "D"      | 6     |
 +-----+-----+----------+-------+
source

pub fn melt2(&self, args: MeltArgs) -> Result<DataFrame, PolarsError>

Similar to melt, but without generics. This may be easier if you want to pass an empty id_vars or empty value_vars.

source§

impl DataFrame

source

pub fn group_by_with_series( &self, by: Vec<Series>, multithreaded: bool, sorted: bool ) -> Result<GroupBy<'_>, PolarsError>

source

pub fn group_by<I, S>(&self, by: I) -> Result<GroupBy<'_>, PolarsError>
where I: IntoIterator<Item = S>, S: AsRef<str>,

Group DataFrame using a Series column.

§Example
use polars_core::prelude::*;
fn group_by_sum(df: &DataFrame) -> PolarsResult<DataFrame> {
    df.group_by(["column_name"])?
    .select(["agg_column_name"])
    .sum()
}
source

pub fn group_by_stable<I, S>(&self, by: I) -> Result<GroupBy<'_>, PolarsError>
where I: IntoIterator<Item = S>, S: AsRef<str>,

Group DataFrame using a Series column. The groups are ordered by their smallest row index.

source§

impl DataFrame

source

pub fn get_row(&self, idx: usize) -> Result<Row<'_>, PolarsError>

Get a row from a DataFrame. Use of this is discouraged as it will likely be slow.

source

pub fn get_row_amortized<'a>( &'a self, idx: usize, row: &mut Row<'a> ) -> Result<(), PolarsError>

Amortize allocations by reusing a row. The caller is responsible to make sure that the row has at least the capacity for the number of columns in the DataFrame

source

pub unsafe fn get_row_amortized_unchecked<'a>( &'a self, idx: usize, row: &mut Row<'a> )

Amortize allocations by reusing a row. The caller is responsible to make sure that the row has at least the capacity for the number of columns in the DataFrame

§Safety

Does not do any bounds checking.

source

pub fn from_rows_and_schema( rows: &[Row<'_>], schema: &Schema ) -> Result<DataFrame, PolarsError>

Create a new DataFrame from rows. This should only be used when you have row wise data, as this is a lot slower than creating the Series in a columnar fashion

source

pub fn from_rows_iter_and_schema<'a, I>( rows: I, schema: &Schema ) -> Result<DataFrame, PolarsError>
where I: Iterator<Item = &'a Row<'a>>,

Create a new DataFrame from an iterator over rows. This should only be used when you have row wise data, as this is a lot slower than creating the Series in a columnar fashion

source

pub fn try_from_rows_iter_and_schema<'a, I>( rows: I, schema: &Schema ) -> Result<DataFrame, PolarsError>
where I: Iterator<Item = Result<&'a Row<'a>, PolarsError>>,

Create a new DataFrame from an iterator over rows. This should only be used when you have row wise data, as this is a lot slower than creating the Series in a columnar fashion

source

pub fn from_rows(rows: &[Row<'_>]) -> Result<DataFrame, PolarsError>

Create a new DataFrame from rows. This should only be used when you have row wise data, as this is a lot slower than creating the Series in a columnar fashion

source§

impl DataFrame

source

pub fn transpose( &mut self, keep_names_as: Option<&str>, new_col_names: Option<Either<String, Vec<String>>> ) -> Result<DataFrame, PolarsError>

Transpose a DataFrame. This is a very expensive operation.

source§

impl DataFrame

source

pub fn top_k( &self, k: usize, descending: impl IntoVec<bool>, by_column: impl IntoVec<SmartString<LazyCompact>> ) -> Result<DataFrame, PolarsError>

source§

impl DataFrame

source

pub fn estimated_size(&self) -> usize

Returns an estimation of the total (heap) allocated size of the DataFrame in bytes.

§Implementation

This estimation is the sum of the size of its buffers, validity, including nested arrays. Multiple arrays may share buffers and bitmaps. Therefore, the size of 2 arrays is not the sum of the sizes computed from this function. In particular, StructArray’s size is an upper bound.

When an array is sliced, its allocated size remains constant because the buffer unchanged. However, this function will yield a smaller number. This is because this function returns the visible size of the buffer, not its total capacity.

FFI buffers are included in this estimation.

source

pub fn _apply_columns(&self, func: &dyn Fn(&Series) -> Series) -> Vec<Series>

source

pub fn _apply_columns_par( &self, func: &(dyn Fn(&Series) -> Series + Sync + Send) ) -> Vec<Series>

source

pub fn new<S>(columns: Vec<S>) -> Result<DataFrame, PolarsError>
where S: IntoSeries,

Create a DataFrame from a Vector of Series.

§Example
let s0 = Series::new("days", [0, 1, 2].as_ref());
let s1 = Series::new("temp", [22.1, 19.9, 7.].as_ref());

let df = DataFrame::new(vec![s0, s1])?;
source

pub const fn empty() -> DataFrame

Creates an empty DataFrame usable in a compile time context (such as static initializers).

§Example
use polars_core::prelude::DataFrame;
static EMPTY: DataFrame = DataFrame::empty();
source

pub fn pop(&mut self) -> Option<Series>

Removes the last Series from the DataFrame and returns it, or None if it is empty.

§Example
let s1 = Series::new("Ocean", &["Atlantic", "Indian"]);
let s2 = Series::new("Area (km²)", &[106_460_000, 70_560_000]);
let mut df = DataFrame::new(vec![s1.clone(), s2.clone()])?;

assert_eq!(df.pop(), Some(s2));
assert_eq!(df.pop(), Some(s1));
assert_eq!(df.pop(), None);
assert!(df.is_empty());
source

pub fn with_row_index( &self, name: &str, offset: Option<u32> ) -> Result<DataFrame, PolarsError>

Add a new column at index 0 that counts the rows.

§Example
let df1: DataFrame = df!("Name" => &["James", "Mary", "John", "Patricia"])?;
assert_eq!(df1.shape(), (4, 1));

let df2: DataFrame = df1.with_row_index("Id", None)?;
assert_eq!(df2.shape(), (4, 2));
println!("{}", df2);

Output:

 shape: (4, 2)
 +-----+----------+
 | Id  | Name     |
 | --- | ---      |
 | u32 | str      |
 +=====+==========+
 | 0   | James    |
 +-----+----------+
 | 1   | Mary     |
 +-----+----------+
 | 2   | John     |
 +-----+----------+
 | 3   | Patricia |
 +-----+----------+
source

pub fn with_row_index_mut( &mut self, name: &str, offset: Option<u32> ) -> &mut DataFrame

Add a row index column in place.

source

pub const unsafe fn new_no_checks(columns: Vec<Series>) -> DataFrame

Create a new DataFrame but does not check the length or duplicate occurrence of the Series.

It is advised to use DataFrame::new in favor of this method.

§Safety

It is the callers responsibility to uphold the contract of all Series having an equal length and a unique name, if not this may panic down the line.

source

pub unsafe fn new_no_length_checks( columns: Vec<Series> ) -> Result<DataFrame, PolarsError>

Create a new DataFrame but does not check the length of the Series, only check for duplicates.

It is advised to use DataFrame::new in favor of this method.

§Safety

It is the callers responsibility to uphold the contract of all Series having an equal length, if not this may panic down the line.

source

pub fn agg_chunks(&self) -> DataFrame

Aggregate all chunks to contiguous memory.

source

pub fn shrink_to_fit(&mut self)

Shrink the capacity of this DataFrame to fit its length.

source

pub fn as_single_chunk(&mut self) -> &mut DataFrame

Aggregate all the chunks in the DataFrame to a single chunk.

source

pub fn as_single_chunk_par(&mut self) -> &mut DataFrame

Aggregate all the chunks in the DataFrame to a single chunk in parallel. This may lead to more peak memory consumption.

source

pub fn should_rechunk(&self) -> bool

Returns true if the chunks of the columns do not align and re-chunking should be done

source

pub fn align_chunks(&mut self) -> &mut DataFrame

Ensure all the chunks in the DataFrame are aligned.

source

pub fn schema(&self) -> Schema

Get the DataFrame schema.

§Example
let df: DataFrame = df!("Thing" => &["Observable universe", "Human stupidity"],
                        "Diameter (m)" => &[8.8e26, f64::INFINITY])?;

let f1: Field = Field::new("Thing", DataType::String);
let f2: Field = Field::new("Diameter (m)", DataType::Float64);
let sc: Schema = Schema::from_iter(vec![f1, f2]);

assert_eq!(df.schema(), sc);
source

pub fn get_columns(&self) -> &[Series]

Get a reference to the DataFrame columns.

§Example
let df: DataFrame = df!("Name" => &["Adenine", "Cytosine", "Guanine", "Thymine"],
                        "Symbol" => &["A", "C", "G", "T"])?;
let columns: &[Series] = df.get_columns();

assert_eq!(columns[0].name(), "Name");
assert_eq!(columns[1].name(), "Symbol");
source

pub unsafe fn get_columns_mut(&mut self) -> &mut Vec<Series>

Get mutable access to the underlying columns.

§Safety

The caller must ensure the length of all Series remains equal.

source

pub fn iter(&self) -> Iter<'_, Series>

Iterator over the columns as Series.

§Example
let s1: Series = Series::new("Name", &["Pythagoras' theorem", "Shannon entropy"]);
let s2: Series = Series::new("Formula", &["a²+b²=c²", "H=-Σ[P(x)log|P(x)|]"]);
let df: DataFrame = DataFrame::new(vec![s1.clone(), s2.clone()])?;

let mut iterator = df.iter();

assert_eq!(iterator.next(), Some(&s1));
assert_eq!(iterator.next(), Some(&s2));
assert_eq!(iterator.next(), None);
source

pub fn get_column_names(&self) -> Vec<&str>

§Example
let df: DataFrame = df!("Language" => &["Rust", "Python"],
                        "Designer" => &["Graydon Hoare", "Guido van Rossum"])?;

assert_eq!(df.get_column_names(), &["Language", "Designer"]);
source

pub fn get_column_names_owned(&self) -> Vec<SmartString<LazyCompact>>

Get the Vec<String> representing the column names.

source

pub fn set_column_names<S>(&mut self, names: &[S]) -> Result<(), PolarsError>
where S: AsRef<str>,

Set the column names.

§Example
let mut df: DataFrame = df!("Mathematical set" => &["ℕ", "ℤ", "𝔻", "ℚ", "ℝ", "ℂ"])?;
df.set_column_names(&["Set"])?;

assert_eq!(df.get_column_names(), &["Set"]);
source

pub fn dtypes(&self) -> Vec<DataType>

Get the data types of the columns in the DataFrame.

§Example
let venus_air: DataFrame = df!("Element" => &["Carbon dioxide", "Nitrogen"],
                               "Fraction" => &[0.965, 0.035])?;

assert_eq!(venus_air.dtypes(), &[DataType::String, DataType::Float64]);
source

pub fn n_chunks(&self) -> usize

The number of chunks per column

source

pub fn fields(&self) -> Vec<Field>

Get a reference to the schema fields of the DataFrame.

§Example
let earth: DataFrame = df!("Surface type" => &["Water", "Land"],
                           "Fraction" => &[0.708, 0.292])?;

let f1: Field = Field::new("Surface type", DataType::String);
let f2: Field = Field::new("Fraction", DataType::Float64);

assert_eq!(earth.fields(), &[f1, f2]);
source

pub fn shape(&self) -> (usize, usize)

Get (height, width) of the DataFrame.

§Example
let df0: DataFrame = DataFrame::default();
let df1: DataFrame = df!("1" => &[1, 2, 3, 4, 5])?;
let df2: DataFrame = df!("1" => &[1, 2, 3, 4, 5],
                         "2" => &[1, 2, 3, 4, 5])?;

assert_eq!(df0.shape(), (0 ,0));
assert_eq!(df1.shape(), (5, 1));
assert_eq!(df2.shape(), (5, 2));
source

pub fn width(&self) -> usize

Get the width of the DataFrame which is the number of columns.

§Example
let df0: DataFrame = DataFrame::default();
let df1: DataFrame = df!("Series 1" => &[0; 0])?;
let df2: DataFrame = df!("Series 1" => &[0; 0],
                         "Series 2" => &[0; 0])?;

assert_eq!(df0.width(), 0);
assert_eq!(df1.width(), 1);
assert_eq!(df2.width(), 2);
source

pub fn height(&self) -> usize

Get the height of the DataFrame which is the number of rows.

§Example
let df0: DataFrame = DataFrame::default();
let df1: DataFrame = df!("Currency" => &["€", "$"])?;
let df2: DataFrame = df!("Currency" => &["€", "$", "¥", "£", "₿"])?;

assert_eq!(df0.height(), 0);
assert_eq!(df1.height(), 2);
assert_eq!(df2.height(), 5);
source

pub fn is_empty(&self) -> bool

Check if the DataFrame is empty.

§Example
let df1: DataFrame = DataFrame::default();
assert!(df1.is_empty());

let df2: DataFrame = df!("First name" => &["Forever"],
                         "Last name" => &["Alone"])?;
assert!(!df2.is_empty());
source

pub unsafe fn hstack_mut_unchecked( &mut self, columns: &[Series] ) -> &mut DataFrame

Add columns horizontally.

§Safety

The caller must ensure:

  • the length of all Series is equal to the height of this DataFrame
  • the columns names are unique
source

pub fn hstack_mut( &mut self, columns: &[Series] ) -> Result<&mut DataFrame, PolarsError>

Add multiple Series to a DataFrame. The added Series are required to have the same length.

§Example
fn stack(df: &mut DataFrame, columns: &[Series]) {
    df.hstack_mut(columns);
}
source

pub fn hstack(&self, columns: &[Series]) -> Result<DataFrame, PolarsError>

Add multiple Series to a DataFrame. The added Series are required to have the same length.

§Example
let df1: DataFrame = df!("Element" => &["Copper", "Silver", "Gold"])?;
let s1: Series = Series::new("Proton", &[29, 47, 79]);
let s2: Series = Series::new("Electron", &[29, 47, 79]);

let df2: DataFrame = df1.hstack(&[s1, s2])?;
assert_eq!(df2.shape(), (3, 3));
println!("{}", df2);

Output:

shape: (3, 3)
+---------+--------+----------+
| Element | Proton | Electron |
| ---     | ---    | ---      |
| str     | i32    | i32      |
+=========+========+==========+
| Copper  | 29     | 29       |
+---------+--------+----------+
| Silver  | 47     | 47       |
+---------+--------+----------+
| Gold    | 79     | 79       |
+---------+--------+----------+
source

pub fn vstack(&self, other: &DataFrame) -> Result<DataFrame, PolarsError>

Concatenate a DataFrame to this DataFrame and return as newly allocated DataFrame.

If many vstack operations are done, it is recommended to call DataFrame::align_chunks.

§Example
let df1: DataFrame = df!("Element" => &["Copper", "Silver", "Gold"],
                         "Melting Point (K)" => &[1357.77, 1234.93, 1337.33])?;
let df2: DataFrame = df!("Element" => &["Platinum", "Palladium"],
                         "Melting Point (K)" => &[2041.4, 1828.05])?;

let df3: DataFrame = df1.vstack(&df2)?;

assert_eq!(df3.shape(), (5, 2));
println!("{}", df3);

Output:

shape: (5, 2)
+-----------+-------------------+
| Element   | Melting Point (K) |
| ---       | ---               |
| str       | f64               |
+===========+===================+
| Copper    | 1357.77           |
+-----------+-------------------+
| Silver    | 1234.93           |
+-----------+-------------------+
| Gold      | 1337.33           |
+-----------+-------------------+
| Platinum  | 2041.4            |
+-----------+-------------------+
| Palladium | 1828.05           |
+-----------+-------------------+
source

pub fn vstack_mut( &mut self, other: &DataFrame ) -> Result<&mut DataFrame, PolarsError>

Concatenate a DataFrame to this DataFrame

If many vstack operations are done, it is recommended to call DataFrame::align_chunks.

§Example
let mut df1: DataFrame = df!("Element" => &["Copper", "Silver", "Gold"],
                         "Melting Point (K)" => &[1357.77, 1234.93, 1337.33])?;
let df2: DataFrame = df!("Element" => &["Platinum", "Palladium"],
                         "Melting Point (K)" => &[2041.4, 1828.05])?;

df1.vstack_mut(&df2)?;

assert_eq!(df1.shape(), (5, 2));
println!("{}", df1);

Output:

shape: (5, 2)
+-----------+-------------------+
| Element   | Melting Point (K) |
| ---       | ---               |
| str       | f64               |
+===========+===================+
| Copper    | 1357.77           |
+-----------+-------------------+
| Silver    | 1234.93           |
+-----------+-------------------+
| Gold      | 1337.33           |
+-----------+-------------------+
| Platinum  | 2041.4            |
+-----------+-------------------+
| Palladium | 1828.05           |
+-----------+-------------------+
source

pub fn extend(&mut self, other: &DataFrame) -> Result<(), PolarsError>

Extend the memory backed by this DataFrame with the values from other.

Different from vstack which adds the chunks from other to the chunks of this DataFrame extend appends the data from other to the underlying memory locations and thus may cause a reallocation.

If this does not cause a reallocation, the resulting data structure will not have any extra chunks and thus will yield faster queries.

Prefer extend over vstack when you want to do a query after a single append. For instance during online operations where you add n rows and rerun a query.

Prefer vstack over extend when you want to append many times before doing a query. For instance when you read in multiple files and when to store them in a single DataFrame. In the latter case, finish the sequence of append operations with a rechunk.

source

pub fn drop_in_place(&mut self, name: &str) -> Result<Series, PolarsError>

Remove a column by name and return the column removed.

§Example
let mut df: DataFrame = df!("Animal" => &["Tiger", "Lion", "Great auk"],
                            "IUCN" => &["Endangered", "Vulnerable", "Extinct"])?;

let s1: PolarsResult<Series> = df.drop_in_place("Average weight");
assert!(s1.is_err());

let s2: Series = df.drop_in_place("Animal")?;
assert_eq!(s2, Series::new("Animal", &["Tiger", "Lion", "Great auk"]));
source

pub fn drop_nulls<S>( &self, subset: Option<&[S]> ) -> Result<DataFrame, PolarsError>
where S: AsRef<str>,

Return a new DataFrame where all null values are dropped.

§Example
let df1: DataFrame = df!("Country" => ["Malta", "Liechtenstein", "North Korea"],
                        "Tax revenue (% GDP)" => [Some(32.7), None, None])?;
assert_eq!(df1.shape(), (3, 2));

let df2: DataFrame = df1.drop_nulls::<String>(None)?;
assert_eq!(df2.shape(), (1, 2));
println!("{}", df2);

Output:

shape: (1, 2)
+---------+---------------------+
| Country | Tax revenue (% GDP) |
| ---     | ---                 |
| str     | f64                 |
+=========+=====================+
| Malta   | 32.7                |
+---------+---------------------+
source

pub fn drop(&self, name: &str) -> Result<DataFrame, PolarsError>

Drop a column by name. This is a pure method and will return a new DataFrame instead of modifying the current one in place.

§Example
let df1: DataFrame = df!("Ray type" => &["α", "β", "X", "γ"])?;
let df2: DataFrame = df1.drop("Ray type")?;

assert!(df2.is_empty());
source

pub fn drop_many<S>(&self, names: &[S]) -> DataFrame
where S: AsRef<str>,

Drop columns that are in names.

source

pub fn drop_many_amortized( &self, names: &HashSet<&str, RandomState> ) -> DataFrame

Drop columns that are in names without allocating a HashSet.

source

pub fn insert_column<S>( &mut self, index: usize, column: S ) -> Result<&mut DataFrame, PolarsError>
where S: IntoSeries,

Insert a new column at a given index.

source

pub fn with_column<S>( &mut self, column: S ) -> Result<&mut DataFrame, PolarsError>
where S: IntoSeries,

Add a new column to this DataFrame or replace an existing one.

source

pub unsafe fn with_column_unchecked(&mut self, column: Series) -> &mut DataFrame

Adds a column to the DataFrame without doing any checks on length or duplicates.

§Safety

The caller must ensure column.len() == self.height() .

source

pub fn _add_columns( &mut self, columns: Vec<Series>, schema: &Schema ) -> Result<(), PolarsError>

source

pub fn with_column_and_schema<S>( &mut self, column: S, schema: &Schema ) -> Result<&mut DataFrame, PolarsError>
where S: IntoSeries,

Add a new column to this DataFrame or replace an existing one. Uses an existing schema to amortize lookups. If the schema is incorrect, we will fallback to linear search.

source

pub fn get(&self, idx: usize) -> Option<Vec<AnyValue<'_>>>

Get a row in the DataFrame. Beware this is slow.

§Example
fn example(df: &mut DataFrame, idx: usize) -> Option<Vec<AnyValue>> {
    df.get(idx)
}
source

pub fn select_at_idx(&self, idx: usize) -> Option<&Series>

Select a Series by index.

§Example
let df: DataFrame = df!("Star" => &["Sun", "Betelgeuse", "Sirius A", "Sirius B"],
                        "Absolute magnitude" => &[4.83, -5.85, 1.42, 11.18])?;

let s1: Option<&Series> = df.select_at_idx(0);
let s2: Series = Series::new("Star", &["Sun", "Betelgeuse", "Sirius A", "Sirius B"]);

assert_eq!(s1, Some(&s2));
source

pub fn select_by_range<R>(&self, range: R) -> Result<DataFrame, PolarsError>
where R: RangeBounds<usize>,

Select column(s) from this DataFrame by range and return a new DataFrame

§Examples
let df = df! {
    "0" => &[0, 0, 0],
    "1" => &[1, 1, 1],
    "2" => &[2, 2, 2]
}?;

assert!(df.select(&["0", "1"])?.equals(&df.select_by_range(0..=1)?));
assert!(df.equals(&df.select_by_range(..)?));
source

pub fn get_column_index(&self, name: &str) -> Option<usize>

Get column index of a Series by name.

§Example
let df: DataFrame = df!("Name" => &["Player 1", "Player 2", "Player 3"],
                        "Health" => &[100, 200, 500],
                        "Mana" => &[250, 100, 0],
                        "Strength" => &[30, 150, 300])?;

assert_eq!(df.get_column_index("Name"), Some(0));
assert_eq!(df.get_column_index("Health"), Some(1));
assert_eq!(df.get_column_index("Mana"), Some(2));
assert_eq!(df.get_column_index("Strength"), Some(3));
assert_eq!(df.get_column_index("Haste"), None);
source

pub fn try_get_column_index(&self, name: &str) -> Result<usize, PolarsError>

Get column index of a Series by name.

source

pub fn column(&self, name: &str) -> Result<&Series, PolarsError>

Select a single column by name.

§Example
let s1: Series = Series::new("Password", &["123456", "[]B$u$g$s$B#u#n#n#y[]{}"]);
let s2: Series = Series::new("Robustness", &["Weak", "Strong"]);
let df: DataFrame = DataFrame::new(vec![s1.clone(), s2])?;

assert_eq!(df.column("Password")?, &s1);
source

pub fn columns<I, S>(&self, names: I) -> Result<Vec<&Series>, PolarsError>
where I: IntoIterator<Item = S>, S: AsRef<str>,

Selected multiple columns by name.

§Example
let df: DataFrame = df!("Latin name" => &["Oncorhynchus kisutch", "Salmo salar"],
                        "Max weight (kg)" => &[16.0, 35.89])?;
let sv: Vec<&Series> = df.columns(&["Latin name", "Max weight (kg)"])?;

assert_eq!(&df[0], sv[0]);
assert_eq!(&df[1], sv[1]);
source

pub fn select<I, S>(&self, selection: I) -> Result<DataFrame, PolarsError>
where I: IntoIterator<Item = S>, S: AsRef<str>,

Select column(s) from this DataFrame and return a new DataFrame.

§Examples
fn example(df: &DataFrame) -> PolarsResult<DataFrame> {
    df.select(["foo", "bar"])
}
source

pub fn _select_impl( &self, cols: &[SmartString<LazyCompact>] ) -> Result<DataFrame, PolarsError>

source

pub fn _select_impl_unchecked( &self, cols: &[SmartString<LazyCompact>] ) -> Result<DataFrame, PolarsError>

source

pub fn select_with_schema<I, S>( &self, selection: I, schema: &Arc<Schema> ) -> Result<DataFrame, PolarsError>
where I: IntoIterator<Item = S>, S: AsRef<str>,

Select with a known schema.

source

pub fn select_with_schema_unchecked<I, S>( &self, selection: I, schema: &Schema ) -> Result<DataFrame, PolarsError>
where I: IntoIterator<Item = S>, S: AsRef<str>,

Select with a known schema. This doesn’t check for duplicates.

source

pub fn select_physical<I, S>( &self, selection: I ) -> Result<DataFrame, PolarsError>
where I: IntoIterator<Item = S>, S: AsRef<str>,

source

pub fn select_series( &self, selection: impl IntoVec<SmartString<LazyCompact>> ) -> Result<Vec<Series>, PolarsError>

Select column(s) from this DataFrame and return them into a Vec.

§Example
let df: DataFrame = df!("Name" => &["Methane", "Ethane", "Propane"],
                        "Carbon" => &[1, 2, 3],
                        "Hydrogen" => &[4, 6, 8])?;
let sv: Vec<Series> = df.select_series(&["Carbon", "Hydrogen"])?;

assert_eq!(df["Carbon"], sv[0]);
assert_eq!(df["Hydrogen"], sv[1]);
source

pub fn filter( &self, mask: &ChunkedArray<BooleanType> ) -> Result<DataFrame, PolarsError>

Take the DataFrame rows by a boolean mask.

§Example
fn example(df: &DataFrame) -> PolarsResult<DataFrame> {
    let mask = df.column("sepal.width")?.is_not_null();
    df.filter(&mask)
}
source

pub fn _filter_seq( &self, mask: &ChunkedArray<BooleanType> ) -> Result<DataFrame, PolarsError>

Same as filter but does not parallelize.

source

pub fn take( &self, indices: &ChunkedArray<UInt32Type> ) -> Result<DataFrame, PolarsError>

Take DataFrame rows by index values.

§Example
fn example(df: &DataFrame) -> PolarsResult<DataFrame> {
    let idx = IdxCa::new("idx", &[0, 1, 9]);
    df.take(&idx)
}
source

pub unsafe fn take_unchecked(&self, idx: &ChunkedArray<UInt32Type>) -> DataFrame

§Safety

The indices must be in-bounds.

source

pub fn rename( &mut self, column: &str, name: &str ) -> Result<&mut DataFrame, PolarsError>

Rename a column in the DataFrame.

§Example
fn example(df: &mut DataFrame) -> PolarsResult<&mut DataFrame> {
    let original_name = "foo";
    let new_name = "bar";
    df.rename(original_name, new_name)
}
source

pub fn sort_in_place( &mut self, by_column: impl IntoVec<SmartString<LazyCompact>>, descending: impl IntoVec<bool>, maintain_order: bool ) -> Result<&mut DataFrame, PolarsError>

Sort DataFrame in place by a column.

source

pub fn sort_impl( &self, by_column: Vec<Series>, descending: Vec<bool>, nulls_last: bool, maintain_order: bool, slice: Option<(i64, usize)>, parallel: bool ) -> Result<DataFrame, PolarsError>

This is the dispatch of Self::sort, and exists to reduce compile bloat by monomorphization.

source

pub fn sort( &self, by_column: impl IntoVec<SmartString<LazyCompact>>, descending: impl IntoVec<bool>, maintain_order: bool ) -> Result<DataFrame, PolarsError>

Return a sorted clone of this DataFrame.

§Example
fn sort_example(df: &DataFrame, descending: bool) -> PolarsResult<DataFrame> {
    df.sort(["a"], descending, false)
}

fn sort_by_multiple_columns_example(df: &DataFrame) -> PolarsResult<DataFrame> {
    df.sort(&["a", "b"], vec![false, true], false)
}
source

pub fn sort_with_options( &self, by_column: &str, options: SortOptions ) -> Result<DataFrame, PolarsError>

Sort the DataFrame by a single column with extra options.

source

pub fn replace<S>( &mut self, column: &str, new_col: S ) -> Result<&mut DataFrame, PolarsError>
where S: IntoSeries,

Replace a column with a Series.

§Example
let mut df: DataFrame = df!("Country" => &["United States", "China"],
                        "Area (km²)" => &[9_833_520, 9_596_961])?;
let s: Series = Series::new("Country", &["USA", "PRC"]);

assert!(df.replace("Nation", s.clone()).is_err());
assert!(df.replace("Country", s).is_ok());
source

pub fn replace_or_add<S>( &mut self, column: &str, new_col: S ) -> Result<&mut DataFrame, PolarsError>
where S: IntoSeries,

Replace or update a column. The difference between this method and DataFrame::with_column is that now the value of column: &str determines the name of the column and not the name of the Series passed to this method.

source

pub fn replace_column<S>( &mut self, index: usize, new_column: S ) -> Result<&mut DataFrame, PolarsError>
where S: IntoSeries,

Replace column at index idx with a Series.

§Example
# use polars_core::prelude::*;
let s0 = Series::new("foo", &["ham", "spam", "egg"]);
let s1 = Series::new("ascii", &[70, 79, 79]);
let mut df = DataFrame::new(vec![s0, s1])?;

// Add 32 to get lowercase ascii values
df.replace_column(1, df.select_at_idx(1).unwrap() + 32);
# Ok::<(), PolarsError>(())
source

pub fn apply<F, S>( &mut self, name: &str, f: F ) -> Result<&mut DataFrame, PolarsError>
where F: FnOnce(&Series) -> S, S: IntoSeries,

Apply a closure to a column. This is the recommended way to do in place modification.

§Example
let s0 = Series::new("foo", &["ham", "spam", "egg"]);
let s1 = Series::new("names", &["Jean", "Claude", "van"]);
let mut df = DataFrame::new(vec![s0, s1])?;

fn str_to_len(str_val: &Series) -> Series {
    str_val.str()
        .unwrap()
        .into_iter()
        .map(|opt_name: Option<&str>| {
            opt_name.map(|name: &str| name.len() as u32)
         })
        .collect::<UInt32Chunked>()
        .into_series()
}

// Replace the names column by the length of the names.
df.apply("names", str_to_len);

Results in:

+--------+-------+
| foo    |       |
| ---    | names |
| str    | u32   |
+========+=======+
| "ham"  | 4     |
+--------+-------+
| "spam" | 6     |
+--------+-------+
| "egg"  | 3     |
+--------+-------+
source

pub fn apply_at_idx<F, S>( &mut self, idx: usize, f: F ) -> Result<&mut DataFrame, PolarsError>
where F: FnOnce(&Series) -> S, S: IntoSeries,

Apply a closure to a column at index idx. This is the recommended way to do in place modification.

§Example
let s0 = Series::new("foo", &["ham", "spam", "egg"]);
let s1 = Series::new("ascii", &[70, 79, 79]);
let mut df = DataFrame::new(vec![s0, s1])?;

// Add 32 to get lowercase ascii values
df.apply_at_idx(1, |s| s + 32);

Results in:

+--------+-------+
| foo    | ascii |
| ---    | ---   |
| str    | i32   |
+========+=======+
| "ham"  | 102   |
+--------+-------+
| "spam" | 111   |
+--------+-------+
| "egg"  | 111   |
+--------+-------+
source

pub fn try_apply_at_idx<F, S>( &mut self, idx: usize, f: F ) -> Result<&mut DataFrame, PolarsError>
where F: FnOnce(&Series) -> Result<S, PolarsError>, S: IntoSeries,

Apply a closure that may fail to a column at index idx. This is the recommended way to do in place modification.

§Example

This is the idiomatic way to replace some values a column of a DataFrame given range of indexes.

let s0 = Series::new("foo", &["ham", "spam", "egg", "bacon", "quack"]);
let s1 = Series::new("values", &[1, 2, 3, 4, 5]);
let mut df = DataFrame::new(vec![s0, s1])?;

let idx = vec![0, 1, 4];

df.try_apply("foo", |s| {
    s.str()?
    .scatter_with(idx, |opt_val| opt_val.map(|string| format!("{}-is-modified", string)))
});

Results in:

+---------------------+--------+
| foo                 | values |
| ---                 | ---    |
| str                 | i32    |
+=====================+========+
| "ham-is-modified"   | 1      |
+---------------------+--------+
| "spam-is-modified"  | 2      |
+---------------------+--------+
| "egg"               | 3      |
+---------------------+--------+
| "bacon"             | 4      |
+---------------------+--------+
| "quack-is-modified" | 5      |
+---------------------+--------+
source

pub fn try_apply<F, S>( &mut self, column: &str, f: F ) -> Result<&mut DataFrame, PolarsError>
where F: FnOnce(&Series) -> Result<S, PolarsError>, S: IntoSeries,

Apply a closure that may fail to a column. This is the recommended way to do in place modification.

§Example

This is the idiomatic way to replace some values a column of a DataFrame given a boolean mask.

let s0 = Series::new("foo", &["ham", "spam", "egg", "bacon", "quack"]);
let s1 = Series::new("values", &[1, 2, 3, 4, 5]);
let mut df = DataFrame::new(vec![s0, s1])?;

// create a mask
let values = df.column("values")?;
let mask = values.lt_eq(1)? | values.gt_eq(5_i32)?;

df.try_apply("foo", |s| {
    s.str()?
    .set(&mask, Some("not_within_bounds"))
});

Results in:

+---------------------+--------+
| foo                 | values |
| ---                 | ---    |
| str                 | i32    |
+=====================+========+
| "not_within_bounds" | 1      |
+---------------------+--------+
| "spam"              | 2      |
+---------------------+--------+
| "egg"               | 3      |
+---------------------+--------+
| "bacon"             | 4      |
+---------------------+--------+
| "not_within_bounds" | 5      |
+---------------------+--------+
source

pub fn slice(&self, offset: i64, length: usize) -> DataFrame

Slice the DataFrame along the rows.

§Example
let df: DataFrame = df!("Fruit" => &["Apple", "Grape", "Grape", "Fig", "Fig"],
                        "Color" => &["Green", "Red", "White", "White", "Red"])?;
let sl: DataFrame = df.slice(2, 3);

assert_eq!(sl.shape(), (3, 2));
println!("{}", sl);

Output:

shape: (3, 2)
+-------+-------+
| Fruit | Color |
| ---   | ---   |
| str   | str   |
+=======+=======+
| Grape | White |
+-------+-------+
| Fig   | White |
+-------+-------+
| Fig   | Red   |
+-------+-------+
source

pub fn clear(&self) -> DataFrame

source

pub fn slice_par(&self, offset: i64, length: usize) -> DataFrame

source

pub fn _slice_and_realloc(&self, offset: i64, length: usize) -> DataFrame

source

pub fn head(&self, length: Option<usize>) -> DataFrame

Get the head of the DataFrame.

§Example
let countries: DataFrame =
    df!("Rank by GDP (2021)" => &[1, 2, 3, 4, 5],
        "Continent" => &["North America", "Asia", "Asia", "Europe", "Europe"],
        "Country" => &["United States", "China", "Japan", "Germany", "United Kingdom"],
        "Capital" => &["Washington", "Beijing", "Tokyo", "Berlin", "London"])?;
assert_eq!(countries.shape(), (5, 4));

println!("{}", countries.head(Some(3)));

Output:

shape: (3, 4)
+--------------------+---------------+---------------+------------+
| Rank by GDP (2021) | Continent     | Country       | Capital    |
| ---                | ---           | ---           | ---        |
| i32                | str           | str           | str        |
+====================+===============+===============+============+
| 1                  | North America | United States | Washington |
+--------------------+---------------+---------------+------------+
| 2                  | Asia          | China         | Beijing    |
+--------------------+---------------+---------------+------------+
| 3                  | Asia          | Japan         | Tokyo      |
+--------------------+---------------+---------------+------------+
source

pub fn tail(&self, length: Option<usize>) -> DataFrame

Get the tail of the DataFrame.

§Example
let countries: DataFrame =
    df!("Rank (2021)" => &[105, 106, 107, 108, 109],
        "Apple Price (€/kg)" => &[0.75, 0.70, 0.70, 0.65, 0.52],
        "Country" => &["Kosovo", "Moldova", "North Macedonia", "Syria", "Turkey"])?;
assert_eq!(countries.shape(), (5, 3));

println!("{}", countries.tail(Some(2)));

Output:

shape: (2, 3)
+-------------+--------------------+---------+
| Rank (2021) | Apple Price (€/kg) | Country |
| ---         | ---                | ---     |
| i32         | f64                | str     |
+=============+====================+=========+
| 108         | 0.63               | Syria   |
+-------------+--------------------+---------+
| 109         | 0.63               | Turkey  |
+-------------+--------------------+---------+
source

pub fn iter_chunks(&self, pl_flavor: bool) -> RecordBatchIter<'_>

Iterator over the rows in this DataFrame as Arrow RecordBatches.

§Panics

Panics if the DataFrame that is passed is not rechunked.

This responsibility is left to the caller as we don’t want to take mutable references here, but we also don’t want to rechunk here, as this operation is costly and would benefit the caller as well.

source

pub fn iter_chunks_physical(&self) -> PhysRecordBatchIter<'_>

Iterator over the rows in this DataFrame as Arrow RecordBatches as physical values.

§Panics

Panics if the DataFrame that is passed is not rechunked.

This responsibility is left to the caller as we don’t want to take mutable references here, but we also don’t want to rechunk here, as this operation is costly and would benefit the caller as well.

source

pub fn reverse(&self) -> DataFrame

Get a DataFrame with all the columns in reversed order.

source

pub fn shift(&self, periods: i64) -> DataFrame

Shift the values by a given period and fill the parts that will be empty due to this operation with Nones.

See the method on Series for more info on the shift operation.

source

pub fn fill_null( &self, strategy: FillNullStrategy ) -> Result<DataFrame, PolarsError>

Replace None values with one of the following strategies:

  • Forward fill (replace None with the previous value)
  • Backward fill (replace None with the next value)
  • Mean fill (replace None with the mean of the whole array)
  • Min fill (replace None with the minimum of the whole array)
  • Max fill (replace None with the maximum of the whole array)

See the method on Series for more info on the fill_null operation.

source

pub fn min_horizontal(&self) -> Result<Option<Series>, PolarsError>

Available on crate feature zip_with only.

Aggregate the column horizontally to their min values.

source

pub fn max_horizontal(&self) -> Result<Option<Series>, PolarsError>

Available on crate feature zip_with only.

Aggregate the column horizontally to their max values.

source

pub fn sum_horizontal( &self, null_strategy: NullStrategy ) -> Result<Option<Series>, PolarsError>

Sum all values horizontally across columns.

source

pub fn mean_horizontal( &self, null_strategy: NullStrategy ) -> Result<Option<Series>, PolarsError>

Compute the mean of all values horizontally across columns.

source

pub fn pipe<F, B>(self, f: F) -> Result<B, PolarsError>
where F: Fn(DataFrame) -> Result<B, PolarsError>,

Pipe different functions/ closure operations that work on a DataFrame together.

source

pub fn pipe_mut<F, B>(&mut self, f: F) -> Result<B, PolarsError>
where F: Fn(&mut DataFrame) -> Result<B, PolarsError>,

Pipe different functions/ closure operations that work on a DataFrame together.

source

pub fn pipe_with_args<F, B, Args>( self, f: F, args: Args ) -> Result<B, PolarsError>
where F: Fn(DataFrame, Args) -> Result<B, PolarsError>,

Pipe different functions/ closure operations that work on a DataFrame together.

source

pub fn unique_stable( &self, subset: Option<&[String]>, keep: UniqueKeepStrategy, slice: Option<(i64, usize)> ) -> Result<DataFrame, PolarsError>

Available on crate feature algorithm_group_by only.

Drop duplicate rows from a DataFrame. This fails when there is a column of type List in DataFrame

Stable means that the order is maintained. This has a higher cost than an unstable distinct.

§Example
let df = df! {
              "flt" => [1., 1., 2., 2., 3., 3.],
              "int" => [1, 1, 2, 2, 3, 3, ],
              "str" => ["a", "a", "b", "b", "c", "c"]
          }?;

println!("{}", df.unique_stable(None, UniqueKeepStrategy::First, None)?);

Returns

+-----+-----+-----+
| flt | int | str |
| --- | --- | --- |
| f64 | i32 | str |
+=====+=====+=====+
| 1   | 1   | "a" |
+-----+-----+-----+
| 2   | 2   | "b" |
+-----+-----+-----+
| 3   | 3   | "c" |
+-----+-----+-----+
source

pub fn unique( &self, subset: Option<&[String]>, keep: UniqueKeepStrategy, slice: Option<(i64, usize)> ) -> Result<DataFrame, PolarsError>

Available on crate feature algorithm_group_by only.

Unstable distinct. See DataFrame::unique_stable.

source

pub fn unique_impl( &self, maintain_order: bool, subset: Option<&[String]>, keep: UniqueKeepStrategy, slice: Option<(i64, usize)> ) -> Result<DataFrame, PolarsError>

Available on crate feature algorithm_group_by only.
source

pub fn is_unique(&self) -> Result<ChunkedArray<BooleanType>, PolarsError>

Available on crate feature algorithm_group_by only.

Get a mask of all the unique rows in the DataFrame.

§Example
let df: DataFrame = df!("Company" => &["Apple", "Microsoft"],
                        "ISIN" => &["US0378331005", "US5949181045"])?;
let ca: ChunkedArray<BooleanType> = df.is_unique()?;

assert!(ca.all());
source

pub fn is_duplicated(&self) -> Result<ChunkedArray<BooleanType>, PolarsError>

Available on crate feature algorithm_group_by only.

Get a mask of all the duplicated rows in the DataFrame.

§Example
let df: DataFrame = df!("Company" => &["Alphabet", "Alphabet"],
                        "ISIN" => &["US02079K3059", "US02079K1079"])?;
let ca: ChunkedArray<BooleanType> = df.is_duplicated()?;

assert!(!ca.all());
source

pub fn null_count(&self) -> DataFrame

Create a new DataFrame that shows the null counts per column.

source

pub fn get_supertype(&self) -> Option<Result<DataType, PolarsError>>

Get the supertype of the columns in this DataFrame

source

pub fn unnest<I>(&self, cols: I) -> Result<DataFrame, PolarsError>
where I: IntoVec<String>,

Available on crate feature dtype-struct only.

Unnest the given Struct columns. This means that the fields of the Struct type will be inserted as columns.

source§

impl DataFrame

source

pub fn schema_equal(&self, other: &DataFrame) -> Result<(), PolarsError>

Check if DataFrame’ schemas are equal.

source

pub fn equals(&self, other: &DataFrame) -> bool

Check if DataFrames are equal. Note that None == None evaluates to false

§Example
let df1: DataFrame = df!("Atomic number" => &[1, 51, 300],
                        "Element" => &[Some("Hydrogen"), Some("Antimony"), None])?;
let df2: DataFrame = df!("Atomic number" => &[1, 51, 300],
                        "Element" => &[Some("Hydrogen"), Some("Antimony"), None])?;

assert!(!df1.equals(&df2));
source

pub fn equals_missing(&self, other: &DataFrame) -> bool

Check if all values in DataFrames are equal where None == None evaluates to true.

§Example
let df1: DataFrame = df!("Atomic number" => &[1, 51, 300],
                        "Element" => &[Some("Hydrogen"), Some("Antimony"), None])?;
let df2: DataFrame = df!("Atomic number" => &[1, 51, 300],
                        "Element" => &[Some("Hydrogen"), Some("Antimony"), None])?;

assert!(df1.equals_missing(&df2));
source

pub fn ptr_equal(&self, other: &DataFrame) -> bool

Checks if the Arc ptrs of the Series are equal

§Example
let df1: DataFrame = df!("Atomic number" => &[1, 51, 300],
                        "Element" => &[Some("Hydrogen"), Some("Antimony"), None])?;
let df2: &DataFrame = &df1;

assert!(df1.ptr_equal(df2));

Trait Implementations§

source§

impl AsofJoin for DataFrame

source§

fn join_asof( &self, other: &DataFrame, left_on: &str, right_on: &str, strategy: AsofStrategy, tolerance: Option<AnyValue<'static>>, suffix: Option<String> ) -> Result<DataFrame, PolarsError>

This is similar to a left-join except that we match on nearest key rather than equal keys. The keys must be sorted to perform an asof join
source§

impl AsofJoinBy for DataFrame

source§

fn join_asof_by<I, S>( &self, other: &DataFrame, left_on: &str, right_on: &str, left_by: I, right_by: I, strategy: AsofStrategy, tolerance: Option<AnyValue<'static>> ) -> Result<DataFrame, PolarsError>
where I: IntoIterator<Item = S>, S: AsRef<str>,

This is similar to a left-join except that we match on nearest key rather than equal keys. The keys must be sorted to perform an asof join. This is a special implementation of an asof join that searches for the nearest keys within a subgroup set by by.
source§

impl Clone for DataFrame

source§

fn clone(&self) -> DataFrame

Returns a copy of the value. Read more
1.0.0 · source§

fn clone_from(&mut self, source: &Self)

Performs copy-assignment from source. Read more
source§

impl CrossJoin for DataFrame

source§

fn cross_join_dfs( &self, other: &DataFrame, slice: Option<(i64, usize)>, parallel: bool ) -> Result<(DataFrame, DataFrame), PolarsError>

source§

fn cross_join( &self, other: &DataFrame, suffix: Option<&str>, slice: Option<(i64, usize)> ) -> Result<DataFrame, PolarsError>

Creates the Cartesian product from both frames, preserves the order of the left keys.
source§

impl DataFrameJoinOps for DataFrame

source§

fn join<I, S>( &self, other: &DataFrame, left_on: I, right_on: I, args: JoinArgs ) -> Result<DataFrame, PolarsError>
where I: IntoIterator<Item = S>, S: AsRef<str>,

Generic join method. Can be used to join on multiple columns. Read more
source§

fn inner_join<I, S>( &self, other: &DataFrame, left_on: I, right_on: I ) -> Result<DataFrame, PolarsError>
where I: IntoIterator<Item = S>, S: AsRef<str>,

Perform an inner join on two DataFrames. Read more
source§

fn left_join<I, S>( &self, other: &DataFrame, left_on: I, right_on: I ) -> Result<DataFrame, PolarsError>
where I: IntoIterator<Item = S>, S: AsRef<str>,

Perform a left join on two DataFrames Read more
source§

fn outer_join<I, S>( &self, other: &DataFrame, left_on: I, right_on: I ) -> Result<DataFrame, PolarsError>
where I: IntoIterator<Item = S>, S: AsRef<str>,

Perform an outer join on two DataFrames Read more
source§

impl Debug for DataFrame

source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error>

Formats the value using the given formatter. Read more
source§

impl Default for DataFrame

source§

fn default() -> DataFrame

Returns the “default value” for a type. Read more
source§

impl DfTake for DataFrame

source§

unsafe fn _take_chunked_unchecked_seq( &self, idx: &[ChunkId], sorted: IsSorted ) -> DataFrame

Take elements by a slice of ChunkIds. Read more
source§

unsafe fn _take_opt_chunked_unchecked_seq( &self, idx: &[Option<ChunkId>] ) -> DataFrame

Take elements by a slice of optional ChunkIds. Read more
source§

unsafe fn _take_chunked_unchecked( &self, idx: &[ChunkId], sorted: IsSorted ) -> DataFrame

Safety Read more
source§

unsafe fn _take_opt_chunked_unchecked( &self, idx: &[Option<ChunkId>] ) -> DataFrame

Safety Read more
source§

impl Display for DataFrame

source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error>

Formats the value using the given formatter. Read more
source§

impl From<&ArrowSchema> for DataFrame

source§

fn from(schema: &ArrowSchema) -> DataFrame

Converts to this type from the input type.
source§

impl From<&Schema> for DataFrame

source§

fn from(schema: &Schema) -> DataFrame

Converts to this type from the input type.
source§

impl From<DataFrame> for Vec<Series>

source§

fn from(df: DataFrame) -> Vec<Series>

Converts to this type from the input type.
source§

impl From<StructChunked> for DataFrame

source§

fn from(ca: StructChunked) -> DataFrame

Converts to this type from the input type.
source§

impl FromIterator<Series> for DataFrame

source§

fn from_iter<T>(iter: T) -> DataFrame
where T: IntoIterator<Item = Series>,

§Panics

Panics if Series have different lengths.

source§

impl Index<&str> for DataFrame

§

type Output = Series

The returned type after indexing.
source§

fn index(&self, index: &str) -> &<DataFrame as Index<&str>>::Output

Performs the indexing (container[index]) operation. Read more
source§

impl Index<Range<usize>> for DataFrame

§

type Output = [Series]

The returned type after indexing.
source§

fn index( &self, index: Range<usize> ) -> &<DataFrame as Index<Range<usize>>>::Output

Performs the indexing (container[index]) operation. Read more
source§

impl Index<RangeFrom<usize>> for DataFrame

§

type Output = [Series]

The returned type after indexing.
source§

fn index( &self, index: RangeFrom<usize> ) -> &<DataFrame as Index<RangeFrom<usize>>>::Output

Performs the indexing (container[index]) operation. Read more
source§

impl Index<RangeFull> for DataFrame

§

type Output = [Series]

The returned type after indexing.
source§

fn index(&self, index: RangeFull) -> &<DataFrame as Index<RangeFull>>::Output

Performs the indexing (container[index]) operation. Read more
source§

impl Index<RangeInclusive<usize>> for DataFrame

§

type Output = [Series]

The returned type after indexing.
source§

fn index( &self, index: RangeInclusive<usize> ) -> &<DataFrame as Index<RangeInclusive<usize>>>::Output

Performs the indexing (container[index]) operation. Read more
source§

impl Index<RangeTo<usize>> for DataFrame

§

type Output = [Series]

The returned type after indexing.
source§

fn index( &self, index: RangeTo<usize> ) -> &<DataFrame as Index<RangeTo<usize>>>::Output

Performs the indexing (container[index]) operation. Read more
source§

impl Index<RangeToInclusive<usize>> for DataFrame

§

type Output = [Series]

The returned type after indexing.
source§

fn index( &self, index: RangeToInclusive<usize> ) -> &<DataFrame as Index<RangeToInclusive<usize>>>::Output

Performs the indexing (container[index]) operation. Read more
source§

impl Index<usize> for DataFrame

§

type Output = Series

The returned type after indexing.
source§

fn index(&self, index: usize) -> &<DataFrame as Index<usize>>::Output

Performs the indexing (container[index]) operation. Read more
source§

impl IntoDf for DataFrame

source§

fn to_df(&self) -> &DataFrame

source§

impl IntoLazy for DataFrame

source§

fn lazy(self) -> LazyFrame

Convert the DataFrame into a LazyFrame

source§

impl JoinDispatch for DataFrame

source§

unsafe fn create_left_df_chunked( &self, chunk_ids: &[ChunkId], left_join: bool ) -> DataFrame

Available on crate feature chunked_ids only.
Safety Read more
source§

unsafe fn _create_left_df_from_slice( &self, join_tuples: &[u32], left_join: bool, sorted_tuple_idx: bool ) -> DataFrame

Safety Read more
source§

fn _finish_left_join( &self, ids: (Either<Vec<u32>, Vec<ChunkId>>, Either<Vec<Option<u32>>, Vec<Option<ChunkId>>>), other: &DataFrame, args: JoinArgs ) -> Result<DataFrame, PolarsError>

Available on crate feature chunked_ids only.
source§

fn _left_join_from_series( &self, other: &DataFrame, s_left: &Series, s_right: &Series, args: JoinArgs, verbose: bool ) -> Result<DataFrame, PolarsError>

source§

fn _outer_join_from_series( &self, other: &DataFrame, s_left: &Series, s_right: &Series, args: JoinArgs ) -> Result<DataFrame, PolarsError>

source§

impl PartialEq for DataFrame

source§

fn eq(&self, other: &DataFrame) -> bool

This method tests for self and other values to be equal, and is used by ==.
1.0.0 · source§

fn ne(&self, other: &Rhs) -> bool

This method tests for !=. The default implementation is almost always sufficient, and should not be overridden without very good reason.
source§

impl PolarsTemporalGroupby for DataFrame

source§

impl PolarsUpsample for DataFrame

source§

fn upsample<I>( &self, by: I, time_column: &str, every: Duration, offset: Duration ) -> Result<DataFrame, PolarsError>
where I: IntoVec<String>,

Upsample a DataFrame at a regular frequency. Read more
source§

fn upsample_stable<I>( &self, by: I, time_column: &str, every: Duration, offset: Duration ) -> Result<DataFrame, PolarsError>
where I: IntoVec<String>,

Upsample a DataFrame at a regular frequency. Read more
source§

impl TryFrom<(Chunk<Box<dyn Array>>, &[Field])> for DataFrame

§

type Error = PolarsError

The type returned in the event of a conversion error.
source§

fn try_from( arg: (Chunk<Box<dyn Array>>, &[Field]) ) -> Result<DataFrame, PolarsError>

Performs the conversion.
source§

impl TryFrom<StructArray> for DataFrame

§

type Error = PolarsError

The type returned in the event of a conversion error.
source§

fn try_from(arr: StructArray) -> Result<DataFrame, PolarsError>

Performs the conversion.

Auto Trait Implementations§

Blanket Implementations§

source§

impl<T> Any for T
where T: 'static + ?Sized,

source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
source§

impl<T> Borrow<T> for T
where T: ?Sized,

source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
source§

impl<T> DataFrameOps for T
where T: IntoDf,

source§

fn to_dummies( &self, separator: Option<&str>, drop_first: bool ) -> Result<DataFrame, PolarsError>

Available on crate feature to_dummies only.
Crea dummy variables. Read more
source§

fn columns_to_dummies( &self, columns: Vec<&str>, separator: Option<&str>, drop_first: bool ) -> Result<DataFrame, PolarsError>

Available on crate feature to_dummies only.
source§

fn _to_dummies( &self, columns: Option<Vec<&str>>, separator: Option<&str>, drop_first: bool ) -> Result<DataFrame, PolarsError>

Available on crate feature to_dummies only.
source§

impl<T> DynClone for T
where T: Clone,

source§

fn __clone_box(&self, _: Private) -> *mut ()

source§

impl<T> From<T> for T

source§

fn from(t: T) -> T

Returns the argument unchanged.

source§

impl<T, U> Into<U> for T
where U: From<T>,

source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

§

impl<T> Pointable for T

§

const ALIGN: usize = _

The alignment of pointer.
§

type Init = T

The type for initializers.
§

unsafe fn init(init: <T as Pointable>::Init) -> usize

Initializes a with the given initializer. Read more
§

unsafe fn deref<'a>(ptr: usize) -> &'a T

Dereferences the given pointer. Read more
§

unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut T

Mutably dereferences the given pointer. Read more
§

unsafe fn drop(ptr: usize)

Drops the object pointed to by the given pointer. Read more
source§

impl<T> ToOwned for T
where T: Clone,

§

type Owned = T

The resulting type after obtaining ownership.
source§

fn to_owned(&self) -> T

Creates owned data from borrowed data, usually by cloning. Read more
source§

fn clone_into(&self, target: &mut T)

Uses borrowed data to replace owned data, usually by cloning. Read more
source§

impl<T> ToString for T
where T: Display + ?Sized,

source§

default fn to_string(&self) -> String

Converts the given value to a String. Read more
source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

§

type Error = Infallible

The type returned in the event of a conversion error.
source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

§

fn vzip(self) -> V