xxxxxxxxxx
# drop duplicates pandas first column
import pandas as pd
# making data frame from csv file
data = pd.read_csv("employees.csv")
# sorting by first name
data.sort_values("First Name", inplace = True)
# dropping ALL duplicte values
data.drop_duplicates(subset ="First Name",keep = False, inplace = True)
# displaying data
print(data)
# remove duplicate row in df
df = df.drop_duplicates()
#remove duplicate columns python dataframepython by Happy Hawk on May 28 2020 Comment
df = df.loc[:,~df.columns.duplicated()]
# dataframe delete duplicate rows with same column value
df = df.drop_duplicates(subset=['Column1', 'Column2'], keep='first')
# Exemple
import pandas as pd
df = pd.DataFrame({"A":["foo", "foo", "foo", "bar"], "B":[0,1,1,1], "C":["A","A","B","A"]})
df.drop_duplicates(subset=['A', 'C'], keep=False)
xxxxxxxxxx
import pandas as pd
# Drop all duplicates in the DataFrame
df = df.drop_duplicates()
# Drop all duplicates in a specific column of the DataFrame
df = df.drop_duplicates(subset = "column")
# Drop all duplicate pairs in DataFrame
df = df.drop_duplicates(subset = ["column", "column2"])
# Display DataFrame
print(df)
xxxxxxxxxx
# Below are quick example
# keep first duplicate row
df2 = df.drop_duplicates()
# Using DataFrame.drop_duplicates() to keep first duplicate row
df2 = df.drop_duplicates(keep='first')
# keep last duplicate row
df2 = df.drop_duplicates( keep='last')
# Remove all duplicate rows
df2 = df.drop_duplicates(keep=False)
# Delete duplicate rows based on specific columns
df2 = df.drop_duplicates(subset=["Courses", "Fee"], keep=False)
# Drop duplicate rows in place
df.drop_duplicates(inplace=True)
# Using DataFrame.apply() and lambda function
df2 = df.apply(lambda x: x.astype(str).str.lower()).drop_duplicates(subset=['Courses', 'Fee'], keep='first')
xxxxxxxxxx
result_df = df.drop_duplicates(subset=['Column1', 'Column2'], keep='first')
print(result_df)
borrar duplicados pandas
xxxxxxxxxx
# Below are quick example
# keep first duplicate row
df2 = df.drop_duplicates()
# Using DataFrame.drop_duplicates() to keep first duplicate row
df2 = df.drop_duplicates(keep='first')
# keep last duplicate row
df2 = df.drop_duplicates( keep='last')
# Remove all duplicate rows
df2 = df.drop_duplicates(keep=False)
# Delete duplicate rows based on specific columns
df2 = df.drop_duplicates(subset=["Courses", "Fee"], keep=False)
# Drop duplicate rows in place
df.drop_duplicates(inplace=True)
# Using DataFrame.apply() and lambda function
df2 = df.apply(lambda x: x.astype(str).str.lower()).drop_duplicates(subset=['Courses', 'Fee'], keep='first')
xxxxxxxxxx
dataFrame.drop_duplicates(subset=None, keep='first', inplace=False)
xxxxxxxxxx
df = df.loc[:,~df.columns.duplicated()].copy()
# https://stackoverflow.com/questions/14984119/python-pandas-remove-duplicate-columns