import numpy as np import pandas as pd import matplotlib.pyplot as plt import utils import statistics from itertools import islice lambdac = 6.5 # in mm int_times_lc = 5 # number of lambdac´s within an evaluation length #%% ############################################################################## # TAREA I - Escribir un programa en Python que calcule los parametros de # amplitud (Ra, Rp, Rv, etc). ############################################################################## #%% ############################################################################## # 1.- Utilizar la primera de la serie de medidas (de 5) que hiciste a lo largo # de la misma linea sobre muestra de madera calibrada ############################################################################## folder = 'C:/Users/danel/Desktop/TFM NUEVO/PYTHON/' filename1 = 'tabla2020-06-17 21.16.13.608.csv' rawdata1 = pd.read_csv(folder + filename1, skiprows=range(2)) rawdata1.columns = ['Timestamp','distance(mm)'] with open(folder + filename1) as myfile: header_string = list(islice(myfile, 1)) print(header_string) rate = float(header_string[0][68:72])*1e3 rawdata1['Timestamp'] = np.arange(0,rawdata1.shape[0],1)/rate # Now x is in dimensions of mm. # v = 5.3 mm/s is the speed of the translation stage: x = rawdata1['Timestamp'].to_numpy()*5.3 dx = x[1] - x[0] z = rawdata1['distance(mm)'].to_numpy() q = int(lambdac / dx) # Number of profile points necessary to calculate the roughness parameters N_lc = utils.N_for_lc(lambdac, dx) # for Rp, Rv, Rt N_ln = utils.N_for_ln(lambdac, dx, int_times_lc=int_times_lc) # for Ra, Rq # Now we subdivide z into the corresponding subintervals according to the points defined above N_subsets_lc = (len(z) - N_lc - 1)//(N_lc - 2*q) x_partitioned = [x[i*(N_lc - 2*q):N_lc + i*(N_lc - 2*q) + 1] for i in range(N_subsets_lc + 1)] z_partitioned = [z[i*(N_lc - 2*q):N_lc + i*(N_lc - 2*q) + 1] for i in range(N_subsets_lc + 1)] N_subsets_ln = (len(z) - N_ln - 1)//(N_ln - 2*q) x_subsets_ln = [x[i*(N_ln - 2*q):N_ln + i*(N_ln - 2*q) + 1] for i in range(N_subsets_ln + 1)] z_subsets_ln = [z[i*(N_ln - 2*q):N_ln + i*(N_ln - 2*q) + 1] for i in range(N_subsets_ln + 1)] #%% # Plot the ENTIRE measured profile (distance, height): #plt.figure(dpi = 600) #plt.plot(x,z) #plt.xlabel('Distancia (mm)') #plt.ylabel('Perfil Primario (mm)') #plt.show() # plt.figure(dpi = 600) for i in range(N_subsets_ln): plt.plot(x_subsets_ln[i], z_subsets_ln[i]) plt.xlabel('Distancia (mm)') plt.ylabel('Perfil Dividido (mm)') #%% ############################################################################## # 2.- Aplicar un filtrado gaussiano para eliminar la ondulacion. ############################################################################## # Create a Gaussian Low Pass Filter, in the interval -lc <= x <= lc: gauss_filter = utils.gauss_low_pass_filter(lambdac, dx = dx) Ra = np.zeros(shape=(len(z_subsets_ln), ), dtype = 'float') Rq = np.zeros(shape=(len(z_subsets_ln), ), dtype = 'float') for i in range(len(Ra)): x_subset, z_low, z_roughness = utils.ampl_trans_profile(gauss_filter, x_subsets_ln[i], z_subsets_ln[i]) Ra[i], Rq[i] = utils.amplitude_params_Ra_Rq(z_roughness) Rp = np.zeros(shape=(len(z_partitioned), ), dtype = 'float') Rv = np.zeros(shape=(len(z_partitioned), ), dtype = 'float') Rt = np.zeros(shape=(len(z_partitioned), ), dtype = 'float') for i in range(len(Rp)): x_subset, z_low, z_roughness = utils.ampl_trans_profile(gauss_filter, x_partitioned[i], z_partitioned[i]) Rp[i], Rv[i], Rt[i] = utils.amplitude_params_Rp_Rv_Rt(z_roughness) #%% Ra_mean = statistics.mean(Ra) Rq_mean = statistics.mean(Rq) # He usado el paquete de "statistics", pero con "numpy" daría el mismo resultado en todos. #Ra1 = np.mean(Ra) #Ra2 = np.std(Ra, ddof=1) Ra_stdstats = statistics.stdev(Ra) Rq_stdstats = statistics.stdev(Rq) #%% Rp_mean = statistics.mean(Rp) Rv_mean = statistics.mean(Rv) Rt_mean = statistics.mean(Rt) Rp_stdstats = statistics.stdev(Rp) Rv_stdstats = statistics.stdev(Rv) Rt_stdstats = statistics.stdev(Rt)