Skip to content

Commit

Permalink
Test permissions
Browse files Browse the repository at this point in the history
Add Gaby's code to echem block

Temp. add PSTrace block

Temp. comit

Temp. comit

Temp commit

Temp commit

Added a way of ploting .csv from PStrace with echem block
  • Loading branch information
BenjaminCharmes committed Jul 29, 2024
1 parent 97312b2 commit 85481e8
Show file tree
Hide file tree
Showing 5 changed files with 255 additions and 198 deletions.
203 changes: 16 additions & 187 deletions pydatalab/pydatalab/apps/PSTrace/PalmSensReader.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,68 +6,15 @@
"""



import pandas as pd

filename = (
"PalmSense_test_datalab.csv" # file with experimetnal data as exported "as csv" from PSTrace
# file with experimetnal data as exported "as csv" from PSTrace
"PalmSense_test_datalab.csv"
)

<<<<<<< HEAD
# The following lines adjust the granularity of reporting.
#pd.options.display.max_rows = 10
#pd.options.display.max_columns = 10
#pd.options.display.float_format = "{:.1f}".format
<<<<<<< HEAD

<<<<<<< HEAD

<<<<<<< HEAD
<<<<<<< HEAD
filename = "PalmSense_test_datalab_shorter.csv"
file_encoding = 'utf-16 LE'
=======


filename = "PalmSense_test_datalab_shorter.csv"
file_encoding = 'utf-16 LE'

def getdata():
"""Loads all the experimental data to a dataframe per excel sheet"""
# set file_encoding to the file encoding (utf8, latin1, etc.)
#df = pd.read_csv(file, header = 2, encoding = file_encoding, index_col = False, names=range(25))

with open(filename, 'r', encoding = file_encoding) as temp_f:
# get No of columns in each line
col_count = [ len(l.split(",")) for l in temp_f.readlines() ]
>>>>>>> cc22c82 (Palmsense reader code now can load the data and separate in different df according to keyword Impedance)


def getdata(filename, file_encoding='utf-16 LE', verbose = False):
=======
filename = "PalmSense_test_datalab_shorter.csv" # file with experimetnal data as exported "as csv" from PSTrace
=======
filename = "PalmSense_test_datalab.csv" # file with experimetnal data as exported "as csv" from PSTrace
<<<<<<< HEAD
>>>>>>> 4fa42bc (code reads all parts of csv output file from PSTrace)
=======

<<<<<<< HEAD
filename = "PalmSense_test_datalab_EIS.csv" # file with experimetnal data as exported "as csv" from PSTrace
>>>>>>> 3378ab0 (solved issues with cases where there are no DC current experiments)
=======
filename = "PalmSense_test_datalab.csv" # file with experimetnal data as exported "as csv" from PSTrace
>>>>>>> 611b9a2 (solved bug)
keyword = "Measurement" #keyword to split input file on
=======
>>>>>>> 3ca1a83 (cleaned up a bit)


def getdata(filename, file_encoding='utf-16 LE', verbose = False ):
>>>>>>> 2d1266c (Formated EIS part of the output files)
=======

def getdata(filename, file_encoding="utf-16 LE", verbose=False):
>>>>>>> 2542e1a ([pre-commit.ci] auto fixes from pre-commit.com hooks)
"""
Loads experimental data from a CSV file, splits the DataFrame based on a specified keyword,
and returns a dictionary containing the resulting DataFrames.
Expand All @@ -89,131 +36,38 @@ def getdata(filename, file_encoding="utf-16 LE", verbose=False):
# Generate column names (names will be 0, 1, 2, ..., maximum columns - 1)
column_names = [i for i in range(0, max(col_count))]

<<<<<<< HEAD
# Read CSV file into a DataFrame
df = pd.read_csv(filename, header=None, names=column_names, encoding=file_encoding)
df = pd.read_csv(filename, header=None,
names=column_names, encoding=file_encoding)

# Find the locations of the keyword "Measurement" in any column. The file onlyhas that when an EIS mesurmment starts
mask = df.apply(lambda row: row.astype(str).str.contains("Measurement"), axis=1)
mask = df.apply(lambda row: row.astype(
str).str.contains("Measurement"), axis=1)
mask["Any"] = mask.any(axis=1)
groups = mask["Any"].cumsum()

# Split the DataFrame based on the keyword occurrences and drop columns with all NaN values
split_dfs = {group: df[group == groups].dropna(axis=1, how="all") for group in groups.unique()}
split_dfs = {group: df[group == groups].dropna(
axis=1, how="all") for group in groups.unique()}

# Display the split DataFrames if verbose=True, default value is False
if verbose:
for key, split_df in split_dfs.items():
print(f"DataFrame for splitting keyword = Measurement occurrence {key}:")
print(
f"DataFrame for splitting keyword = Measurement occurrence {key}:")
print(split_df)
print("\n")

return split_dfs

<<<<<<< HEAD
<<<<<<< HEAD

<<<<<<< HEAD
<<<<<<< HEAD
<<<<<<< HEAD




=======
### Read csv
df = pd.read_csv(filename, header=None, names=column_names,encoding = file_encoding)

# print(dfhead)
# print (df.head(n=10))

df.to_csv("test.csv")


# Keyword to split on
keyword = 'Impedance'

# Find the locations of the keyword in any column
mask = df.apply(lambda row: row.astype(str).str.contains(keyword), axis=1)
mask['Any'] = mask.any(axis=1)
groups = mask["Any"].cumsum()

# Splitting the DataFrame based on the keyword occurrences
split_dfs = {group: df[group == groups] for group in groups.unique()}

# Display the split DataFrames
for key, split_df in split_dfs.items():
print(f"DataFrame for '{keyword}' occurrence {key}:")
print(split_df)
print("\n")






return df
>>>>>>> cc22c82 (Palmsense reader code now can load the data and separate in different df according to keyword Impedance)
=======
def formatdata(split_dfs):
=======
# def formatdata(split_dfs):
>>>>>>> c84820a (Cleaned and documented code for impedance spectroscopy data formating)

# dfs_with_freq= []
# impedance_dfs = {}
# for key, df in split_dfs.items():

# if df.apply(lambda row: row.astype(str).str.contains('freq / Hz')).any().any():
# dfs_with_freq.append(key)
# df = split_dfs[key].reset_index(drop=True)
# name_row = (df[df.apply(lambda row: row.astype(str).str.contains('Measurement'))
# .any(axis=1)].index[0])
# new_name = df.iloc[name_row][1]
# date_row = (df[df.apply(lambda row: row.astype(str).str.contains('Date and time'))
# .any(axis=1)].index[0])
# date_time = df.iloc[date_row][1]
# print(date_time)




# # Find the index of the row containing the string 'freq / Hz'
# index_with_freq = (df[df.apply(lambda row: row.astype(str).str.contains('freq / Hz'))
# .any(axis=1)].index[0])

# # Set the row with 'freq / Hz' as the header
# df.columns = df.iloc[index_with_freq]

# # Remove the row that contains 'freq / Hz' and rows before it (index < index_with_freq)
# df = df.drop(index_with_freq).drop(index=range(0, index_with_freq))

# # Display the DataFrame after setting the header and removing unnecessary rows
# impedance_dfs[new_name]= {"Date and Time" : date_time, "Data": df}

# if dfs_with_freq:
# n = len (dfs_with_freq)
# print(impedance_dfs)
# print(f"There are {n} Impedance measurements")

# else:
# print("No part of this file contains Impedance measurements")


=======
>>>>>>> 4fa42bc (code reads all parts of csv output file from PSTrace)
=======
=======

>>>>>>> 2542e1a ([pre-commit.ci] auto fixes from pre-commit.com hooks)
def find_row(df, keyword):
"""This function finds the index of a row containiing a keyword
Args:
- df : DataFrame where we are searching
- keyword (str): the keyword it searches"""

return df[df.apply(lambda row: row.astype(str).str.contains(keyword)).any(axis=1)].index[0]
>>>>>>> 3ca1a83 (cleaned up a bit)


def format_impedance_data(split_dfs):
Expand Down Expand Up @@ -270,21 +124,8 @@ def format_impedance_data(split_dfs):
print(f"There are {n} Impedance measurements")
else:
print("The are no Impedance measurements")
<<<<<<< HEAD

<<<<<<< HEAD
>>>>>>> 2d1266c (Formated EIS part of the output files)

=======
=======

>>>>>>> 2542e1a ([pre-commit.ci] auto fixes from pre-commit.com hooks)
return impedance_dfs
<<<<<<< HEAD


>>>>>>> c84820a (Cleaned and documented code for impedance spectroscopy data formating)
=======


def format_DC_data(split_dfs):
Expand All @@ -305,7 +146,8 @@ def format_DC_data(split_dfs):
for key, df in split_dfs.items():
# Check for the presence of 'Date and time measurement:', only present in dataframes of DC measurements
if (
df.apply(lambda row: row.astype(str).str.contains("Date and time measurement:"))
df.apply(lambda row: row.astype(str).str.contains(
"Date and time measurement:"))
.any()
.any()
):
Expand All @@ -322,7 +164,7 @@ def format_DC_data(split_dfs):
# (each DC measurements only consists on 2 columns that can change in the magnitude measured
# possible magnitudes: time(s), Voltage (V), Currrent (microA)
DC_dfs = {
f"DC measurement {int(i/2)}": df.iloc[:, i : i + 2]
f"DC measurement {int(i/2)}": df.iloc[:, i: i + 2]
for i in range(0, df.shape[1], 2)
}
if DC_data:
Expand All @@ -347,7 +189,8 @@ def format_DC_data(split_dfs):
df.dropna(how="all", inplace=True)

# Store the extracted information and data in the 'DC_dfs' dictionary
DC_dfs[key] = {"Name": new_name, "Date and Time": date_time, "Data": df}
DC_dfs[key] = {"Name": new_name,
"Date and Time": date_time, "Data": df}
new_key = f"DC measurement ({key})"

n = len(dfs_DC_meas)
Expand All @@ -356,20 +199,6 @@ def format_DC_data(split_dfs):
else:
print("There are no direct current measurements")

<<<<<<< HEAD
<<<<<<< HEAD




>>>>>>> 4fa42bc (code reads all parts of csv output file from PSTrace)
=======

>>>>>>> 3ca1a83 (cleaned up a bit)
def main():
""" Main program """
=======
>>>>>>> 2542e1a ([pre-commit.ci] auto fixes from pre-commit.com hooks)

def main():
"""Main program"""
Expand Down
Binary file modified pydatalab/pydatalab/apps/PSTrace/PalmSense_test_datalab.csv
Binary file not shown.
Binary file not shown.
Loading

0 comments on commit 85481e8

Please sign in to comment.