fun.MEF_ECflux011 <- function(CAL=1,YEAR=2011,N=30,FOZ=F,CCvar=1,tlag=300, Rotate=2,old=F,SKIP1=F,SKIP2=F, MAIN=1,Midplot=F) { ## Function to take time series from MEF sonic (either ATI or RM Young initially) ## calculate alot of sonic parameters and fluxes. # Cal == Calibration, ==0, no calibration - use default offset/span, ==1, autocalibrations (after 5/2010) # YEAR == 2009 (OK for Nov or December, 2009) or later # LICOR == T/F, is Li7000 running # FOZ == T/F, is the Fast Ozone running. # N == how many minutes/flux averaging period (usually 30 or 15) # CCvar = 1, use tc for cross correlation, == 2, use w for cross correlation. # tlag = # of samples to use in cross correlation (300 = 30 sec.) # Rotate = method of coordinate rotation: 1=run-to-run, 2=planar fit ###main sonic running at 10 Hz in 2010 with Li7000 (CO2/H2O) ########################################## ### MISSING PARENTHESIS !! 4/5/2011 ### Input text file with names of input/output files in it. ### Will need to update "c:/data/Manitou08/Datafiles.txt" to analyze different months. ### will create monthly files (either 1 or 5 min. averages) if(YEAR == 2009) datafile <- scan("c:/data/Manitou/Manitou09/Datafiles09.txt",what="character") if(YEAR == 2010) datafile <- scan("c:/data/Manitou/Manitou010/Datafiles010.txt",what="character") if(YEAR == 2011) datafile <- scan("c:/data/Manitou/Manitou011/Datafiles011.txt",what="character") ###################### ### Flux Laptop Files: ###################### ## Parse "datafile" into the input and output filenames infile <- datafile[24] ## gives list of data filenames to read in infile2 <- datafile[26] if(CAL == 1) calfile <- datafile[50] ### Output data outdata <- datafile[51] ## where the output will go. month <- datafile[30] cat("\n Month:",month) ###################### ### Backup Laptop Files: ###################### infile.bk <- datafile[27] ## gives list of data filenames to read in infile2.bk <- datafile[29] ################################### ### Output data outdata.bk <- datafile[56] ## where the output will go. if(CAL == 1) calfile.bk <- datafile[55] ############################## FinalData <- datafile[57] #### Note: Flux Laptop file denoted as "AC", BAckup Laptop denoted as "BK" ############################################################################# ###########################################################################################################################3 ########## DO THE MAIN FLUX LAPTOP DATA FIRST !!!!!!!! ################################################################## ########################################################################################################################### ## read in the names of data files and set output files if(SKIP1==F) { ## read in the names of data files and set output files datanames <- scan(infile,skip=1,sep=",",list(index=0,filenames="",starttime="",z=0,FF1=0, Fw=0, Brand="",Rate=0,Licor=0)) ## Data needed from MEF_sonic010: input.data <- matrix(scan(infile2,sep=" ",list(year=0,jday=0,hour=0,minute=0,second=0, dday=0, U=0,sU=0,Udir=0,sUdir=0,Vws=0,ustar=0,sdw=0,H=0,Tbar=0,z.L=0,t.co2=0,t.h2o=0)),byrow=T,ncol=18) ## Calibration Data #browser() if(CAL == 1) { if(old==T) { Cal.data <- matrix(scan(calfile,sep=" ",list(year=0,jday=0,hour=0,minute=0,second=0,dday=0,endday=0,Calflag=0,Coffset=0,Cspan=0,Hoffset=0,Scoef=0,Pz=0,Psp=0,Ps=0)),byrow=T,ncol=15) } else { Cal.data <- matrix(scan(calfile,sep=" ",list(year=0,jday=0,hour=0,minute=0,second=0,dday=0,endday=0,Calflag=0,Coffset=0,Cspan=0,Hoffset=0,Scoef=0,Pz=0,Psp=0,Ps=0,Sdday=0,junk="")),byrow=T,ncol=17) } } ####) NOTE - filenames are on 1 hr basis, input.data/cal.data are 30 min. basis!!! ################ ################################################################################################## #### Filenames ########################################### ### Parse out some data....... filenames <- datanames$filenames t.starts <- datanames$starttime t.startA <- strptime(t.starts, "%m/%d/%Y %H:%M") t.starts <- as.POSIXct(t.startA) z <- datanames$z ##3 measurement height. FF1 <- datanames$FF1 ## flag for half of the data (1st/2nd or all of file) ### FF1=0, 1st half, FF1=1, 2nd half, FF1=2-5, doubled files. index <- datanames$index sflag <- datanames$Fw ## other sonic flag??? BRAND <- datanames$Brand Reprate <- datanames$Rate ## sampling frequency Licor <- datanames$Licor ## is the Li7000 running? ###### other input data (only need delay times for co2 and H2o, but other data is good to see time syncing!) ################ Sonic Data ################################################################# ########NOTE - these time series will be longer than datanames. (by ~ x2, but not quite) ##### however, should be the same number of sample periods as we get from the program below. dday1 <- as.ts(input.data[[6]]) ustar <- as.ts(input.data[[12]]) H <- as.ts(input.data[[14]]) Tempbar <- as.ts(input.data[[15]]) + 273.15 ## in K t.co2 <- as.ts(input.data[[17]]) t.h2o <- as.ts(input.data[[18]]) ### need to get rid of NAs in dday1!!! (otherwise crashes later...) p <- is.na(dday1) dday1 <- dday1[p==F] ustar <- ustar[p==F] H <- H[p==F] Tempbar <- Tempbar[p==F] t.co2 <- t.co2[p==F] t.h2o <- t.h2o[p==F] ### one more filter - replace any NAs in t.co2 and t.h2o with median value p <- is.na(t.co2) t.co2 <- ifelse(p==T,median(t.co2,na.rm=T),t.co2) p <- is.na(t.h2o) t.h2o <- ifelse(p==T,median(t.h2o,na.rm=T),t.h2o) # recombine... i.data <- cbind(dday1,ustar,H,Tempbar,t.co2,t.h2o) ############## Cal. DAta ################################################################## if(CAL == 1) { Calflag <- as.ts(Cal.data[[8]]) Coffset <- as.ts(Cal.data[[9]]) Hoffset <- as.ts(Cal.data[[11]]) Scoef <- as.ts(Cal.data[[12]]) Caldday <- as.ts(Cal.data[[6]]) p <- is.na(Caldday) Caldday <- ifelse(p == T,0,Caldday) Cal <-cbind(Caldday,Calflag,Coffset,Hoffset,Scoef) } ##################################### ############ before looping through the files - should sync the sonic and cal. data. ### use dday1 and Caldday ############################ i <- 1 ## sonic data and Cal2 (new Cal data) j <- 1 ## Cal data Lsonic <- length(dday1) Cal2 <- matrix(NA, ncol=5,nrow=Lsonic) i <- 1 # input file loop index j <- 1 # row of Input data matrix and output file loop index and err1ct <- 0 Lfile <- length(filenames) ## use the input sonic file to get length of output file. Lout <- length(dday1) ## generate output matrix M1 <- matrix(NA,ncol=22,nrow=Lout) ### give it 100 extra rows for missed samples period(s) - will get rid of at the end. cat("\n Stop and test the initial Input data matrix!") cat("\n Check length of sonic data and cal. data!") browser() while(i <= Lfile) { cat("\n i=",i) cat("\n j=",j) ##### First - find out if Licor is running!!! if(Licor[i] == 1) { ## Get start time t1 <- t.starts[i] ttag <- fun.timetag(t.startA[i]) ## get decimal day: dday <- ttag[2] + (ttag[3]/2400) + (ttag[4]/1440) dday <- signif(dday,d=7) if(j == 1) lastdday <- dday ## get sampling rate if(Reprate[i] == 10) tstep <- 0.1 if(Reprate[i] == 20) tstep <- 0.05 cat("\n Index:",index[i]) # echo to screen cat("\n time: ", as.character(t1)) cat("\n Decimal Day:",dday) cat("\n\n") if(BRAND[i] == "ATI") { d <- matrix(scan(filenames[i],sep="\t",list(date="",msec=0,u=0,v=0,w=0,tc=0,xx=0,count=0,co2=0,h2o=0,Pli=0,Tli=0,A1=0,A2=0)),byrow=T,ncol=14) ### xx is just a blank column ### parse the data and make into 10 Hz time series.... date <- as.ts(d[[1]]) u <- as.ts(d[[3]]) v <- as.ts(d[[4]]) w <- as.ts(d[[5]]) tc <- as.ts(d[[6]]) ## get CO2 and H2O (and O3) for lag time calcs. co2 <- as.ts(d[[10]]) h2o <- as.ts(d[[11]]) Zero <- as.ts(d[[14]]) Span <- as.ts(d[[15]]) } if(BRAND[i] == "RMY") { d <- matrix(scan(filenames[i],sep="\t",list(date="",msec=0,u=0,v=0,w=0,tc=0,A1=0,A2=0,A3=0,A4=0,Ecode=0,B="",count=0,co2=0,h2o=0,Pli=0,Tli=0,A1=0,A2=0)),byrow=T,ncol=19) ### parse the data and make into 10 Hz time series.... date <- as.ts(d[[1]]) u <- as.ts(d[[3]]) v <- as.ts(d[[4]]) w <- as.ts(d[[5]]) tc <- as.ts(d[[6]]) ## get CO2 and H2O (and O3) for lag time calcs. co2 <- as.ts(d[[10]]) h2o <- as.ts(d[[11]]) Zero <- as.ts(d[[14]]) Span <- as.ts(d[[15]]) } if(BRAND[i] == "CSI") { d <- matrix(scan(filenames[i],sep="\t",list(date="",msec=0,u=0,v=0,w=0,tc=0,SOS=0,xx=0,count=0,co2=0,h2o=0,Pli=0,Tli=0,A1=0,A2=0)),byrow=T,ncol=15) ### parse the data and make into 10 Hz time series.... date <- as.ts(d[[1]]) u <- as.ts(d[[3]]) v <- as.ts(d[[4]]) w <- as.ts(d[[5]]) tc <- as.ts(d[[6]]) ## get CO2 and H2O (and O3) for lag time calcs. co2 <- as.ts(d[[10]]) h2o <- as.ts(d[[11]]) Zero <- as.ts(d[[14]]) Span <- as.ts(d[[15]]) } #if(m==1) browser() ###### Get time tags into something workable ############### ############################################################ date <- strptime(date, "%m/%d/%Y %H:%M:%S") date2 <- fun.Ltimetag(date) #### now have numeric dates - matrix of yy,jday,hrx100,min,sec ## get decimal day: dday <- date2[,2] + (date2[,3]/2400) + (date2[,4]/1440) dday <- signif(dday,d=7) date2 <- cbind(date2,dday) ## add decimal day to date. ## also create an index - sequential numbers to reference Ldata <- length(date2[,1]) ############################################################## ## create an index - sequential numbers to reference index <- seq(from=1,to=Ldata,by=1) ############################################ ######### Put all data in matrix #################################### DATA <- cbind(index,date2,u,v,w,tc,co2,h2o,Zero,Span) k <- 1 Ldata <- length(DATA[,1]) ### how many flux periods/file if(tstep == 0.1) Npts <- (600*N) - 1 # N=# of minutes, 600 pts/min at 10 Hz if(tstep == 0.05) Npts <- (1200*N) - 1 # N=# of minutes, 600 pts/min at 10 Hz Lk <- length(DATA[,1]) while(k < Ldata) { dday.j <- dday1[j] endpt <- k + Npts if(endpt > Lk) endpt <- Lk ### get a flux averaging period's worth of data DATA.i <- DATA[k:endpt,] ###### get start and end time (only DOY for end time!) Lsubset <- length(DATA.i[,1]) ### make sure that we have at least 5 min of data! if(Lsubset > (10*Npts/N)) { ########## test to see if sec > 50 (usually 58 or 59) currsec <- DATA.i[1,6] currmin <- DATA.i[1,5] currhr <- DATA.i[1,4] currday <- DATA.i[1,3] t1 <- DATA.i[1,2:7] if(currsec > 50) { t1 <- fun.Addminute(t1) err1ct <- err1ct + 1 ## add an error } ttag <- t1 ### use the first point endday <- DATA.i[Lsubset,7] ### DOY of end time. Cdday.j <- Caldday[j] cat("\n Check time tag!") cat("\n Sonic Day:",dday.j) cat("\n Data Day:",ttag[6]) cat("\n Cal. Day:",Cdday.j) cat("\n j=",j) cat("\n\n") dday.test <- ttag[6] ### how to keep sync'd dday.diff <- dday.j - dday.test ### how to keep sync'd dday.diff <- dday.j - dday.test diff.test <- (N/1440)/0.8 if(abs(dday.diff) > diff.test) { cat("\n Out of sync - time tags don't match") if(dday.diff > diff.test) { while(dday.diff > diff.test) { j <- j - 1 dday.j <- dday1[j] dday.diff <- dday.j - dday.test } cat("/Time tags off - are they corrected? #1") #browser() } if(dday.diff < (-1*diff.test)) { while(dday.diff < (-1*diff.test)) { j <- j + 1 dday.j <- dday1[j] dday.diff <- dday.j - dday.test } cat("/Time tags off - are they corrected? #2") #browser() } cat("\n Sonic Day:",dday.j) cat("\n Data Day:",ttag[6]) cat("\n j=",j) if(abs(dday.diff) > diff.test) browser() } ### end of if(dday.diff > diff test) ### parse the data back.... u <- ts(DATA.i[,8],start=0,deltat=tstep) v<- ts(DATA.i[,9],start=0,deltat=tstep) w<- ts(DATA.i[,10],start=0,deltat=tstep) tc<- ts(DATA.i[,11],start=0,deltat=tstep) co2 <- ts(DATA.i[,12],start=0,deltat=tstep) h2o<- ts(DATA.i[,13],start=0,deltat=tstep) Zero <- ts(DATA.i[,14],start=0,deltat=tstep) Span <- ts(DATA.i[,15],start=0,deltat=tstep) cat("\n Before despiking") #browser() # in Sept. 2008 - getting spikes that are 0,0,0,0. # special routine to look for those. d <- fun.zero.despike(u,v,w,tc) u <- d$u v <- d$v w <- d$w tc <- d$tc tc <- tc + 273.15 if(BRAND[i] == "ATI") { # test u d <- fun.ati.despike(u,"u",n=4) u <- d$x u.SPIKE <- d$SPIKE # test v d <- fun.ati.despike(v,"v",n=4) v <- d$x v.SPIKE <- d$SPIKE # test w d <- fun.ati.despike(w,"w",n=4) w <- d$x w.SPIKE <- d$SPIKE # test tc d <- fun.ati.despike(tc,"tc",n=4) tc <- d$x tc.SPIKE <- d$SPIKE } if(BRAND[i] == "RMY") { d <- fun.RMY.despike(u,v,w,tc) u <- d$u v <- d$v w <- d$w tc <- d$tc spike <- d$z3 # test u d <- fun.despike(u,"u",n=4) u <- d$x u.SPIKE <- d$SPIKE # test v d <- fun.despike(v,"v",n=4) v <- d$x v.SPIKE <- d$SPIKE # test w d <- fun.despike(w,"w",n=4) w <- d$x w.SPIKE <- d$SPIKE # test tc d <- fun.despike(tc,"tc",n=3) tc <- d$x tc.SPIKE <- d$SPIKE } if(BRAND[i] == "CSI") { # test u d <- fun.despike(u,"u",n=4) u <- d$x u.SPIKE <- d$SPIKE # test v d <- fun.despike(v,"v",n=4) v <- d$x v.SPIKE <- d$SPIKE # test w d <- fun.despike(w,"w",n=4) w <- d$x w.SPIKE <- d$SPIKE # test tc d <- fun.despike(tc,"tc",n=5) tc <- d$x tc.SPIKE <- d$SPIKE } ######## Do we need to despike CO2, H2O and O3 - leave for later !!!! ### From Nov16-Dec17, 2010. Was dropping samples (giving line of zero's) ### then despike after getting rid of calibration points! d <- fun.LImissample(co2,h2o,Zero,Span) co2 <- d$co2 h2o <- d$h2o Zero <- d$Zero Span <- d$Span cat("\n After despiking") #browser() cat("\n Total u-spikes:", u.SPIKE) cat("\n Total v-spikes:", v.SPIKE) cat("\n Total w-spikes:", w.SPIKE) par(mfrow=c(3,1)) #plot(u,type="l") #plot(w,type="l") plot(tc,type="l") plot(co2,type="l") plot(h2o,type="l") #browser() ubar <- mean(u, na.rm=T) vbar <- mean(v, na.rm=T) wstats <- fun.skewness(w, OUT=F) wbar <- wstats$xbar Tbar <- mean(tc, na.rm=T) Tbar.C <- Tbar + 273.15 ########### Put in planar fit rotation!!!!! if(Rotate==1) { vwRotate <- F # align wind vectors with the mean wind - 10 Hz data ## run-by-run, 2 rotations (Kaimal and Finnigan) d <- fun.coord.rot(u, v, w, ubar, vbar, wbar) u <- d$u v <- d$v w <- d$w theta <- d$theta #theta = u-v rotation phi <- d$phi } if(Rotate==2) { # align wind vectors with the mean wind - 10 Hz data ## run-by-run, 2 rotations (Kaimal and Finnigan) d <- fun.topo.rotate(u, v, w, month) u <- d$u3 v <- d$v3 w <- d$w2 } ## get wbar after rotation (important for planar fit) wbar <- mean(w,na.rm=T) ####### should probably compute u* here from planar fit rotation!!! v.w <- var(v,w, na.rm=T) u.w <- var(u,w, na.rm=T) ustar <- ((u.w)^2 + (v.w)^2) ^ 0.25 ustar <- signif(ustar,d=5) cat("\n ustar:",ustar) ### get proper time lags dday.lag <- i.data[j,1] d.ttag <- ttag[6] - dday.lag ### dont think we need this anymore!!! Syncing earlier. #if(1==0) { # #if(abs(d.ttag) > 0.0002) { # cat("NOT sync'd") ## while(d.ttag > 0.0002) { # m <- m + 1 # dday.lag <- i.data[m,1] # d.ttag <- ttag[6] - dday.lag # } # # while(d.ttag < -0.0002) { # m <- m - 1 # dday.lag <- i.data[m,1] # d.ttag <- ttag[6] - dday.lag # } # # } ## end of if(abs(d.ttag) > 0.0002) # # } ### end of if(1==0) ### Now deal with CO2 and H2O calibration if(CAL == 0) { ### no autocals - use a default calibration coefficient ### before May, 2010, CO2offset = 0, H2O offset = ?? and Scoeff = 0.899 (median) Z.co2 <- 0 Z.h2o <- 0 S.co2 <- 0.899 ## 3/2010, reading ~ 427 ppm, should probably be around 390. 390/427=0.913 ## until we actually calibrate the Li7000 under exp.conditions!!!! #S.co2 <- 1 S.h2o <- 1 } else { #### figure out autocalibrations later ### will replace these. #if(i==1) browser() Z.co2 <- Coffset[j] Z.h2o <- Hoffset[j] S.co2 <- Scoef[j] if(month == "test") S.co2 <- 1 Cflag.m <- Calflag[j] S.h2o <- 1 } ####### Put Ozone calibration in here!!!!!!! ## put in calibrations co2 <- (co2 - Z.co2)*S.co2 h2o <- (h2o - Z.h2o)*S.h2o #### if Calibrating - need to replace calibration time with NAs if(CAL == 1) { if(Cflag.m == 1) { ## during zero and span - zero status is high (span can be either) Z.i <- Zero > 2 ## further check for calibrations Zsum <- sum(Z.i,na.rm=T) if(Zsum > 0) { ## Replace calibration data with NAs. co2 <- ifelse(Z.i == 1,NA,co2) h2o <- ifelse(Z.i == 1, NA,h2o) ### would also like to get rid of first 150 points after calibration (15 seconds)... Cindex <- seq(from=1,to=length(co2),by=1) Cindex <- Cindex[Z.i] ## now just the index numbers during calibration Cmax <- max(Cindex,na.rm=T) ## last point test <- length(co2) - Cmax #if(j==792 & month=="aug") browser() if(test >= 150) { Cend <- Cmax + 150 ## get rid of 150 pts after calibration co2[Cmax:Cend] <- NA h2o[Cmax:Cend] <- NA cat("\n Calibration: test time series") } lines(h2o,col=2,lty=2) #browser() } ### end of if(Zsum > 0) } ### end of if(Cflag==1) } ### end of if(Cal==1) ###### now we can despike the CO2 and H2o time series: # test co2 d <- fun.despike(co2,"CO2",n=5) co2 <- d$x # test co2 d <- fun.despike(h2o,"H2O",n=5) h2o <- d$x co2.ppm <- mean(co2,na.rm=T) #h2o.mmol <- mean(h2o,na.rm=T) ########## some constants we need ############################## # Convert Pressure from kPa to kg m^-3 p.bar <- 76 # in kPa (estimated - will get measured value later) rho <- p.bar/(Tbar * 0.287) # R = 0.287 (kPa kg / K m^3) # units are kg m^-3 Vm <- 22.414*(101.3/p.bar)*(Tbar/273.15) cp <- 1004.67 Tbar.C <- Tbar - 273.15 W.avg <- mean(h2o, na.rm=T) e <- (p.bar/1000)*W.avg rhov <- (e*1000)/(Tbar*0.4615) # rhov in g/m^3 rhov <- rhov/1000 # kg/m^3 rhoa <- rho - rhov # kg/m^3 sigma <- rhov/rhoa mu <- 1.6077 Lv <- (2.501 - (0.00237*Tbar.C))*1e6 ### J kg-1 (lat. heat of vapor of H2O) #################################################################### cat("should be sync'd, ready to go through step-by-step!") #if(i==1) browser() ### should be sync'd now. #browser() ################################ ## convert to umole/m^3 co2 <- co2*(1000/Vm) rhoc <- mean(co2,na.rm=T) ## or mmole/m^3 h2o <- h2o*(1000/Vm) ################################ ############ Calculate Fluxes ############################# ## First heat flux - using planar fit coordinate rotation w.tc <- var(w,tc, na.rm=T) H.pf <- rho*cp*w.tc ########## should detrend both H2O and CO2 time series tstep <- 0.1 ###3 Screwed up here!!! ################ h2o <- fun.detrend(h2o,tstep) co2 <- fun.detrend(co2,tstep) ####################################################### ## get in # of samples, negative lag = moves time series later tlag.co2 <- i.data[j,5] tlag.h2o <- i.data[j,6] ####tlag.o3 <- i.data[j,7] wLco2 <- lag(w,k=tlag.co2) wLh2o <- lag(w,k=tlag.h2o) ######h2oL <- lag(h2o,k=tlag.h2o) #####o3L <- lag(o3,k=tlag.o3) ## combine everything: WlagCO2 <- cbind(co2,wLco2) WlagH2O <- cbind(h2o,wLh2o) #### calculate latent heat flux (no H-correction) w.q <- var(WlagH2O[,2],WlagH2O[,1],na.rm=T) ##3 water flux, mmol m-2 s-1 w.q.kg <- w.q*(18/1e6) ### kg m-2 s-1 LE <- Lv*(1 + mu*sigma)*w.q.kg ### W m-2 #### calculate CO2 flux (with LE Webb correction) w.c <- var(WlagCO2[,2],WlagCO2[,1],na.rm=T) ### Raw Co2 covariance EcorrF <- (rhoc/rhoa)*(mu/(1+mu*sigma))*LE/Lv ### LE webb corr. Fco2 <- w.c + EcorrF ### final Webb-corr'd flux ### should compute sensible heat flux Webb correction here - just for info ### (can compare to online EOL data easier!) HcorrF <- (rhoc/rho)*(H.pf/(cp*Tbar)) #umol m-2 s-1 ######## Will do Ozone here ########################## ################ Collect terms and round off the data ######## w.q <- signif(w.q,d=5) w.c <- signif(w.c, d=5) LE <- signif(LE, d=4) EcorrF <- signif(EcorrF, d=4) Fco2 <- signif(Fco2,d=5) W.avg <- signif(W.avg,d=5) co2.ppm <- signif(co2.ppm,d=5) H <- i.data[j,3] wbar <- signif(wbar,d=4) ####3 will need some ozone values here cat("\n H=",H) cat("\n LE=",LE) cat("\n F(co2)=",Fco2) cat("\n [CO2]=",co2.ppm) cat("\n [H2O]=",W.avg) cat("\n HcorrF=",HcorrF) cat("\n\n") #browser() ####################### need to gather up data here################################33 tname <- c(ttag,endday, dday.lag, co2.ppm,W.avg,H,LE,Fco2,w.q,w.c,EcorrF,H.pf,wbar,HcorrF,dday.j,Cdday.j,ustar) #if(i == 1) browser() M1[j,1:22] <- tname #browser() dput(M1,outdata) k <- endpt + 1 j <- j + 1 } ## end of if(Lsubset > (Npts/N)) else { k <- Ldata ### set k to last pt. to get out of while loop. } } ## end of while(k < Ldata) lastdday <- dday i <- i + 1 } ## end of if(Licor[i] == 1) else { i <- i + 1 ## increment the loop and move on cat("\n No CO2 data!!") j <- j + 1 ## increment j dday.j <- dday1[j] } } ## end of while(i < Lfile) statement cat("\n End of while loop!") #browser() #### At end - get rid of extra rows with NAs in them. #M1 <- M1[1:j,] ### put in quick filter for very crazy flux values: Hpf <- M1[,17] LE <- M1[,12] Fco2 <- M1[,13] flag1 <- ifelse(Fco2 > 20 | Fco2 < -30,1,0) flag2 <- ifelse(LE > 600 | LE < -100,1,0) flag3 <- ifelse(Hpf > 1000 | Hpf < -500,1,0) ##3 now substitute some stuff Fco2 <- ifelse(flag1 == 1,NA,Fco2) wc <- ifelse(flag1 == 1,NA,M1[,15]) LE <- ifelse(flag2 == 1,NA,LE) wq <- ifelse(flag2 == 1,NA,M1[,14]) Hpf <- ifelse(flag3 == 1,NA,Hpf) if(Midplot==T) { cat("\n Make some plots - if everthing looks OK - then output") par(mfrow=c(2,1)) plot(M1[,6], M1[,11],type="l",ylab="Sensible Heat") lines(M1[,6],Hpf,col=2,lty=2) cat("\n Black=Kaimal/Finnigan, Red=Planar Fit") plot(M1[,6],Hpf,type="l",ylab="Energy Fluxes") lines(M1[,6],LE,col=2) cat("\n Black=H, Red=LE") browser() ################################################# plot(M1[,6], M1[,9],type="l",ylab="CO2, ppm") plot(M1[,6],M1[,10],type="l",ylab="H2O, mmol mol-1") browser() ################################################ plot(M1[,6], LE,type="l",ylab="LE, W/m^2") plot(M1[,6], Fco2,type="l",ylab="Fco2, umol m-2 s-1") browser() } #### end of if(Midplot==T) ## now put back in substituted data: M1[,13] <- Fco2 M1[,12] <- LE M1[,14] <- wq M1[,15] <- wc M1[,17] <- Hpf ## then output. fun.asci(M1,outdata) } ### END OF if(SKIP1=F) else { ### read in the text file that was computed previously ## read in the output file from the flux laptop D <- scan(outdata,sep=" ",list(year=0,jday=0,hh=0,mm=0,ss=0,tstr=0,tend=0,tlag=0,co2=0,W=0,H=0,LE=0,Fco2=0,wq=0,wc=0,EcorrF=0,Hpf=0,wbar=0,HcorrF=0,ddayJ=0,ddayJ2=0,ustar=0)) M1 <- cbind(D$year,D$jday,D$hh,D$mm,D$ss,D$tstr,D$tend,D$tlag,D$co2,D$W,D$H,D$LE,D$Fco2,D$wq,D$wc,D$EcorrF,D$Hpf,D$wbar,D$HcorrF,D$ddayJ,D$ddayJ2,D$ustar) cat("\ Read in flux laptop data") } ######### Need to work on - add ozone to the backup laptop ############################################################################################################ ############################################################################################################ ###### Now start the loop for the Backup Laptop ############################################################ ############################################################################################################ ## read in the names of data files and set output files if(SKIP2==F) { ## read in the names of data files and set output files datanames <- scan(infile.bk,skip=1,sep=",",list(index=0,filenames="",starttime="",z=0,FF1=0, Fw=0, Brand="",Rate=0,Licor=0,Fozone=0)) ## Data needed from MEF_sonic011: input.data <- matrix(scan(infile2.bk,sep=" ",list(year=0,jday=0,hour=0,minute=0,second=0, dday=0, U=0,sU=0,Udir=0,sUdir=0,Vws=0,ustar=0,sdw=0,H=0,Tbar=0,z.L=0,t.co2=0,t.h2o=0)),byrow=T,ncol=18) ## Calibration Data #browser() if(CAL == 1) { if(old==T) { Cal.data <- matrix(scan(calfile.bk,sep=" ",list(year=0,jday=0,hour=0,minute=0,second=0,dday=0,endday=0,Calflag=0,Coffset=0,Cspan=0,Hoffset=0,Scoef=0,Pz=0,Psp=0,Ps=0)),byrow=T,ncol=15) } else { Cal.data <- matrix(scan(calfile.bk,sep=" ",list(year=0,jday=0,hour=0,minute=0,second=0,dday=0,endday=0,Calflag=0,Coffset=0,Cspan=0,Hoffset=0,Scoef=0,dday2=0,Psp="",Ps="",Sdday="",junk="")),byrow=T,ncol=17) } } ####) NOTE - filenames are on 1 hr basis, input.data/cal.data are 30 min. basis!!! ################ ################################################################################################## #### Filenames ########################################### ### Parse out some data....... filenames <- datanames$filenames t.starts <- datanames$starttime t.startA <- strptime(t.starts, "%m/%d/%Y %H:%M") t.starts <- as.POSIXct(t.startA) z <- datanames$z ##3 measurement height. CO2.std <- datanames$CO2 ## Cal. Std. concentration for CO2 index <- datanames$index Fw <- datanames$Fw ## other sonic flag??? BRAND <- datanames$Brand Reprate <- datanames$Rate ## sampling frequency Licor <- datanames$Licor ## is the Li7000 running? O3flag <- datanames$Fozone ## are we measuring CO2 or O3 with analog voltage? ###### other input data (only need delay times for co2 and H2o, but other data is good to see time syncing!) ################ Sonic Data ################################################################# ########NOTE - these time series will be longer than datanames. (by ~ x2, but not quite) ##### however, should be the same number of sample periods as we get from the program below. dday1 <- as.ts(input.data[[6]]) ustar <- as.ts(input.data[[12]]) H <- as.ts(input.data[[14]]) Tempbar <- as.ts(input.data[[15]]) + 273.15 ## in K t.co2 <- as.ts(input.data[[17]]) t.h2o <- as.ts(input.data[[18]]) ### need to get rid of NAs in dday1!!! (otherwise crashes later...) p <- is.na(dday1) dday1 <- dday1[p==F] ustar <- ustar[p==F] H <- H[p==F] Tempbar <- Tempbar[p==F] t.co2 <- t.co2[p==F] t.h2o <- t.h2o[p==F] ### one more filter - replace any NAs in t.co2 and t.h2o with median value p <- is.na(t.co2) t.co2 <- ifelse(p==T,median(t.co2,na.rm=T),t.co2) p <- is.na(t.h2o) t.h2o <- ifelse(p==T,median(t.h2o,na.rm=T),t.h2o) # recombine... i.data <- cbind(dday1,ustar,H,Tempbar,t.co2,t.h2o) ############## Cal. DAta ################################################################## if(CAL == 1) { Calflag <- as.ts(Cal.data[[8]]) Coffset <- as.ts(Cal.data[[9]]) Hoffset <- as.ts(Cal.data[[11]]) Scoef <- as.ts(Cal.data[[12]]) Caldday <- as.ts(Cal.data[[6]]) p <- is.na(Caldday) Caldday <- ifelse(p == T,0,Caldday) Cal <-cbind(Caldday,Calflag,Coffset,Hoffset,Scoef) } ##################################### ############ before looping through the files - should sync the sonic and cal. data. ### use dday1 and Caldday ############################ i <- 1 ## sonic data and Cal2 (new Cal data) j <- 1 ## Cal data Lsonic <- length(dday1) Cal2 <- matrix(NA, ncol=5,nrow=Lsonic) i <- 1 # input file loop index j <- 1 # row of Input data matrix and output file loop index and err1ct <- 0 Lfile <- length(filenames) ## use the input sonic file to get length of output file. Lout <- length(dday1) ## generate output matrix M2 <- matrix(NA,ncol=22,nrow=Lout) cat("\n Stop and test the initial Input data matrix!") cat("\n Check length of sonic data and cal. data!") browser() while(i <= Lfile) { cat("\n i=",i) cat("\n j=",j) ##### First - find out if Licor is running!!! if(Licor[i] == 1) { ## Get start time t1 <- t.starts[i] ttag <- fun.timetag(t.startA[i]) ## get decimal day: dday <- ttag[2] + (ttag[3]/2400) + (ttag[4]/1440) dday <- signif(dday,d=7) if(j == 1) lastdday <- dday ## get sampling rate if(Reprate[i] == 10) tstep <- 0.1 if(Reprate[i] == 20) tstep <- 0.05 cat("\n Index:",index[i]) # echo to screen cat("\n time: ", as.character(t1)) cat("\n Decimal Day:",dday) cat("\n\n") if(BRAND[i] == "ATI") { if(Fw[i]==4) { d <- matrix(scan(filenames[i],sep="\t",list(date="",msec=0,u=0,v=0,w=0,tc=0,xx=0,Vh2o=0,Vco2=0,A1=0,A2=0)),byrow=T,ncol=11) } else { d <- matrix(scan(filenames[i],sep="\t",list(date="",msec=0,u=0,v=0,w=0,tc=0,xx=0,Vh2o=0,Vco2=0,A1=0)),byrow=T,ncol=10) } ### xx is just a blank column ### parse the data and make into 10 Hz time series.... date <- as.ts(d[[1]]) u <- as.ts(d[[3]]) v <- as.ts(d[[4]]) w <- as.ts(d[[5]]) tc <- as.ts(d[[6]]) ## get CO2 and H2O (and O3) for lag time calcs. co2 <- as.ts(d[[9]]) h2o <- as.ts(d[[8]]) } if(BRAND[i] == "RMY") { if(Fw[i]==4) { d <- matrix(scan(filenames[i],sep="\t",list(date="",msec=0,u=0,v=0,w=0,tc=0,A1=0,A2=0,A3=0,A4=0,Ecode=0,B="",Vh2o=0,Vco2=0,A5=0,A6=0)),byrow=T,ncol=16) } else { d <- matrix(scan(filenames[i],sep="\t",list(date="",msec=0,u=0,v=0,w=0,tc=0,A1=0,A2=0,A3=0,A4=0,Ecode=0,B="",Vh2o=0,Vco2=0,A5=0)),byrow=T,ncol=15) } ### parse the data and make into 10 Hz time series.... date <- as.ts(d[[1]]) u <- as.ts(d[[3]]) v <- as.ts(d[[4]]) w <- as.ts(d[[5]]) tc <- as.ts(d[[6]]) ## get CO2 and H2O (and O3) for lag time calcs. co2 <- as.ts(d[[14]]) h2o <- as.ts(d[[13]]) } if(BRAND[i] == "CSI") { if(Fw[i]==4) { d <- matrix(scan(filenames[i],sep="\t",list(date="",msec=0,u=0,v=0,w=0,tc=0,SOS=0,xx=0,Vh2o=0,Vco2=0,A1=0,A2=0)),byrow=T,ncol=12) } else { d <- matrix(scan(filenames[i],sep="\t",list(date="",msec=0,u=0,v=0,w=0,tc=0,SOS=0,xx=0,Vh2o=0,Vco2=0,A1=0)),byrow=T,ncol=11) } ### parse the data and make into 10 Hz time series.... date <- as.ts(d[[1]]) u <- as.ts(d[[3]]) v <- as.ts(d[[4]]) w <- as.ts(d[[5]]) tc <- as.ts(d[[6]]) ## get CO2 and H2O (and O3) for lag time calcs. co2 <- as.ts(d[[10]]) h2o <- as.ts(d[[9]]) Zero <- as.ts(d[[11]]) if(Fw[i] == 4) Span <- as.ts(d[[12]]) else Span <- as.ts(d[[11]]) ## just read in the zero again! } cat("\n TESt #1") #if(i==158 & j == 314) browser() ######### do we need to designate a "span" vector?? maybe not - just use the zero #if(i==1) browser() ###### Get time tags into something workable ############### ############################################################ date <- strptime(date, "%m/%d/%Y %H:%M:%S") date2 <- fun.Ltimetag(date) #### now have numeric dates - matrix of yy,jday,hrx100,min,sec ## get decimal day: dday <- date2[,2] + (date2[,3]/2400) + (date2[,4]/1440) dday <- signif(dday,d=7) date2 <- cbind(date2,dday) ## add decimal day to date. ## also create an index - sequential numbers to reference Ldata <- length(date2[,1]) ############################################################## ## create an index - sequential numbers to reference index <- seq(from=1,to=Ldata,by=1) ############################################ ######### Put all data in matrix #################################### DATA <- cbind(index,date2,u,v,w,tc,co2,h2o,Zero,Span) k <- 1 Ldata <- length(DATA[,1]) ### how many flux periods/file if(tstep == 0.1) Npts <- (600*N) - 1 # N=# of minutes, 600 pts/min at 10 Hz if(tstep == 0.05) Npts <- (1200*N) - 1 # N=# of minutes, 600 pts/min at 10 Hz Lk <- length(DATA[,1]) while(k < Ldata) { dday.j <- dday1[j] endpt <- k + Npts if(endpt > Lk) endpt <- Lk ### get a flux averaging period's worth of data DATA.i <- DATA[k:endpt,] ###### get start and end time (only DOY for end time!) Lsubset <- length(DATA.i[,1]) ### make sure that we have at least 5 min of data! if(Lsubset > (10*Npts/N)) { ########## test to see if sec > 50 (usually 58 or 59) currsec <- DATA.i[1,6] currmin <- DATA.i[1,5] currhr <- DATA.i[1,4] currday <- DATA.i[1,3] t1 <- DATA.i[1,2:7] if(currsec > 50) { t1 <- fun.Addminute(t1) err1ct <- err1ct + 1 ## add an error } ttag <- t1 ### use the first point endday <- DATA.i[Lsubset,7] ### DOY of end time. Cdday.j <- Caldday[j] cat("\n Check time tag!") cat("\n Sonic Day:",dday.j) cat("\n Data Day:",ttag[6]) cat("\n Cal. Day:",Cdday.j) cat("\n j=",j) cat("\n\n") dday.test <- ttag[6] ### how to keep sync'd dday.diff <- dday.j - dday.test ### how to keep sync'd dday.diff <- dday.j - dday.test diff.test <- (N/1440)/0.8 if(abs(dday.diff) > diff.test) { cat("\n Out of sync - time tags don't match") if(dday.diff > diff.test) { while(dday.diff > diff.test) { j <- j - 1 dday.j <- dday1[j] dday.diff <- dday.j - dday.test } cat("/Time tags off - are they corrected? #1") #browser() } if(dday.diff < (-1*diff.test)) { while(dday.diff < (-1*diff.test)) { j <- j + 1 dday.j <- dday1[j] dday.diff <- dday.j - dday.test } cat("/Time tags off - are they corrected? #2") #browser() } cat("\n Sonic Day:",dday.j) cat("\n Data Day:",ttag[6]) cat("\n j=",j) if(abs(dday.diff) > diff.test) browser() } ### end of if(dday.diff > diff test) ### parse the data back.... u <- ts(DATA.i[,8],start=0,deltat=tstep) v<- ts(DATA.i[,9],start=0,deltat=tstep) w<- ts(DATA.i[,10],start=0,deltat=tstep) tc<- ts(DATA.i[,11],start=0,deltat=tstep) co2 <- ts(DATA.i[,12],start=0,deltat=tstep) h2o<- ts(DATA.i[,13],start=0,deltat=tstep) Zero <- ts(DATA.i[,14],start=0,deltat=tstep) Span <- ts(DATA.i[,15],start=0,deltat=tstep) cat("\n Before despiking") #browser() # in Sept. 2008 - getting spikes that are 0,0,0,0. # special routine to look for those. d <- fun.zero.despike(u,v,w,tc) u <- d$u v <- d$v w <- d$w tc <- d$tc tc <- tc + 273.15 if(BRAND[i] == "ATI") { # test u d <- fun.ati.despike(u,"u",n=4) u <- d$x u.SPIKE <- d$SPIKE # test v d <- fun.ati.despike(v,"v",n=4) v <- d$x v.SPIKE <- d$SPIKE # test w d <- fun.ati.despike(w,"w",n=4) w <- d$x w.SPIKE <- d$SPIKE # test tc d <- fun.ati.despike(tc,"tc",n=4) tc <- d$x tc.SPIKE <- d$SPIKE } if(BRAND[i] == "RMY") { d <- fun.RMY.despike(u,v,w,tc) u <- d$u v <- d$v w <- d$w tc <- d$tc spike <- d$z3 # test u d <- fun.despike(u,"u",n=4) u <- d$x u.SPIKE <- d$SPIKE # test v d <- fun.despike(v,"v",n=4) v <- d$x v.SPIKE <- d$SPIKE # test w d <- fun.despike(w,"w",n=4) w <- d$x w.SPIKE <- d$SPIKE # test tc d <- fun.despike(tc,"tc",n=3) tc <- d$x tc.SPIKE <- d$SPIKE } if(BRAND[i] == "CSI") { # test u d <- fun.despike(u,"u",n=4) u <- d$x u.SPIKE <- d$SPIKE # test v d <- fun.despike(v,"v",n=4) v <- d$x v.SPIKE <- d$SPIKE # test w d <- fun.despike(w,"w",n=4) w <- d$x w.SPIKE <- d$SPIKE # test tc d <- fun.despike(tc,"tc",n=5) tc <- d$x tc.SPIKE <- d$SPIKE } ######## Do we need to despike CO2, H2O and O3 - leave for later !!!! ### From Nov16-Dec17, 2010. Was dropping samples (giving line of zero's) ### then despike after getting rid of calibration points! #d <- fun.LImissample(co2,h2o,Zero,Span) #co2 <- d$co2 #h2o <- d$h2o #Zero <- d$Zero #Span <- d$Span cat("\n After despiking") #browser() cat("\n Total u-spikes:", u.SPIKE) cat("\n Total v-spikes:", v.SPIKE) cat("\n Total w-spikes:", w.SPIKE) par(mfrow=c(3,1)) #plot(u,type="l") #plot(w,type="l") plot(tc,type="l") plot(co2,type="l") plot(h2o,type="l") #browser() ubar <- mean(u, na.rm=T) vbar <- mean(v, na.rm=T) wstats <- fun.skewness(w, OUT=F) wbar <- wstats$xbar Tbar <- mean(tc, na.rm=T) Tbar.C <- Tbar + 273.15 ########### Put in planar fit rotation!!!!! if(Rotate==1) { vwRotate <- F # align wind vectors with the mean wind - 10 Hz data ## run-by-run, 2 rotations (Kaimal and Finnigan) d <- fun.coord.rot(u, v, w, ubar, vbar, wbar) u <- d$u v <- d$v w <- d$w theta <- d$theta #theta = u-v rotation phi <- d$phi } if(Rotate==2) { # align wind vectors with the mean wind - 10 Hz data ## run-by-run, 2 rotations (Kaimal and Finnigan) d <- fun.topo.rotate(u, v, w, month) u <- d$u3 v <- d$v3 w <- d$w2 } ## get wbar after rotation (important for planar fit) wbar <- mean(w,na.rm=T) ####### should probably compute u* here from planar fit rotation!!! v.w <- var(v,w, na.rm=T) u.w <- var(u,w, na.rm=T) ustar <- ((u.w)^2 + (v.w)^2) ^ 0.25 ustar <- signif(ustar,d=5) cat("\n ustar:",ustar) ### get proper time lags dday.lag <- i.data[j,1] d.ttag <- ttag[6] - dday.lag ### dont think we need this anymore!!! Syncing earlier. #if(1==0) { # #if(abs(d.ttag) > 0.0002) { # cat("NOT sync'd") ## while(d.ttag > 0.0002) { # m <- m + 1 # dday.lag <- i.data[m,1] # d.ttag <- ttag[6] - dday.lag # } # # while(d.ttag < -0.0002) { # m <- m - 1 # dday.lag <- i.data[m,1] # d.ttag <- ttag[6] - dday.lag # } # # } ## end of if(abs(d.ttag) > 0.0002) # # } ### end of if(1==0) ### Now deal with CO2 and H2O calibration if(CAL == 0) { ### no autocals - use a default calibration coefficient ### before May, 2010, CO2offset = 0, H2O offset = ?? and Scoeff = 0.899 (median) Z.co2 <- 0 Z.h2o <- 0 S.co2 <- 0.899 ## 3/2010, reading ~ 427 ppm, should probably be around 390. 390/427=0.913 ## until we actually calibrate the Li7000 under exp.conditions!!!! #S.co2 <- 1 S.h2o <- 4 } else { #### figure out autocalibrations later ### will replace these. #if(i==1) browser() Z.co2 <- Coffset[j] Z.h2o <- Hoffset[j] S.co2 <- Scoef[j] if(month == "test") S.co2 <- 1 Cflag.m <- Calflag[j] ######### change here when changing scales on H2O voltage!!!! S.h2o <- 4 if(ttag[6] >= 186.58) S.h2o <- 6 } #if(i==1 & j==1) browser() ####### Put Ozone calibration in here!!!!!!! ## put in calibrations if(O3flag[i] == 0) { test <- is.na(S.co2) if(test == 1) S.co2 <- median(Scoef,na.rm=T) co2 <- (co2 - Z.co2)*S.co2 } else co2 <- (co2 - Z.co2) ## this is really ozone - will just subtract the offset ## and compute the flux in mV (or Volts?) h2o <- (h2o - Z.h2o)*S.h2o #### if Calibrating - need to replace calibration time with NAs if(CAL == 1) { if(Cflag.m == 1) { ## during zero and span - zero status is high (span can be either) Z.i <- Zero > 2 ## further check for calibrations Zsum <- sum(Z.i,na.rm=T) if(Zsum > 0) { ## Replace calibration data with NAs. co2 <- ifelse(Z.i == 1,NA,co2) h2o <- ifelse(Z.i == 1, NA,h2o) ### would also like to get rid of first 150 points after calibration (15 seconds)... Cindex <- seq(from=1,to=length(co2),by=1) Cindex <- Cindex[Z.i] ## now just the index numbers during calibration Cmax <- max(Cindex,na.rm=T) ## last point test <- length(co2) - Cmax #if(j==792 & month=="aug") browser() if(test >= 150) { Cend <- Cmax + 150 ## get rid of 150 pts after calibration co2[Cmax:Cend] <- NA h2o[Cmax:Cend] <- NA cat("\n Calibration: test time series") } lines((h2o/4),col=2,lty=2) #browser() } } ### end of if(Cflag==1) } ### end of if(Cal==1) ###### now we can despike the CO2 and H2o time series: # test co2 d <- fun.despike(co2,"CO2",n=5) co2 <- d$x # test co2 d <- fun.despike(h2o,"H2O",n=5) h2o <- d$x co2.ppm <- mean(co2,na.rm=T) #h2o.mmol <- mean(h2o,na.rm=T) ########## some constants we need ############################## # Convert Pressure from kPa to kg m^-3 p.bar <- 76 # in kPa (estimated - will get measured value later) rho <- p.bar/(Tbar * 0.287) # R = 0.287 (kPa kg / K m^3) # units are kg m^-3 Vm <- 22.414*(101.3/p.bar)*(Tbar/273.15) cp <- 1004.67 Tbar.C <- Tbar - 273.15 W.avg <- mean(h2o, na.rm=T) e <- (p.bar/1000)*W.avg rhov <- (e*1000)/(Tbar*0.4615) # rhov in g/m^3 rhov <- rhov/1000 # kg/m^3 rhoa <- rho - rhov # kg/m^3 sigma <- rhov/rhoa mu <- 1.6077 Lv <- (2.501 - (0.00237*Tbar.C))*1e6 ### J kg-1 (lat. heat of vapor of H2O) #################################################################### cat("should be sync'd, ready to go through step-by-step!") #if(i==465 & j == 929) browser() ### should be sync'd now. #browser() ################################ ## convert to umole/m^3 if(O3flag[i] ==0) co2 <- co2*(1000/Vm) rhoc <- mean(co2,na.rm=T) ## or mmole/m^3 h2o <- h2o*(1000/Vm) ################################ ############ Calculate Fluxes ############################# ## First heat flux - using planar fit coordinate rotation w.tc <- var(w,tc, na.rm=T) H.pf <- rho*cp*w.tc ########## should detrend both H2O and CO2 time series tstep <- 0.1 ###3 Screwed up here!!! ################ h2o <- fun.detrend(h2o,tstep) co2 <- fun.detrend(co2,tstep) ####################################################### ## get in # of samples, negative lag = moves time series later tlag.co2 <- i.data[j,5] tlag.h2o <- i.data[j,6] ####tlag.o3 <- i.data[j,7] wLco2 <- lag(w,k=tlag.co2) wLh2o <- lag(w,k=tlag.h2o) ######h2oL <- lag(h2o,k=tlag.h2o) #####o3L <- lag(o3,k=tlag.o3) ## combine everything: WlagCO2 <- cbind(co2,wLco2) WlagH2O <- cbind(h2o,wLh2o) #### calculate latent heat flux (no H-correction) w.q <- var(WlagH2O[,2],WlagH2O[,1],na.rm=T) ##3 water flux, mmol m-2 s-1 w.q.kg <- w.q*(18/1e6) ### kg m-2 s-1 LE <- Lv*(1 + mu*sigma)*w.q.kg ### W m-2 #### calculate CO2 flux (with LE Webb correction) w.c <- var(WlagCO2[,2],WlagCO2[,1],na.rm=T) ### Raw Co2 covariance EcorrF <- (rhoc/rhoa)*(mu/(1+mu*sigma))*LE/Lv ### LE webb corr. Fco2 <- w.c + EcorrF ### final Webb-corr'd flux ### should compute sensible heat flux Webb correction here - just for info ### (can compare to online EOL data easier!) HcorrF <- (rhoc/rho)*(H.pf/(cp*Tbar)) #umol m-2 s-1 ######## Will do Ozone here ########################## ################ Collect terms and round off the data ######## w.q <- signif(w.q,d=5) w.c <- signif(w.c, d=5) LE <- signif(LE, d=4) EcorrF <- signif(EcorrF, d=4) Fco2 <- signif(Fco2,d=5) W.avg <- signif(W.avg,d=5) co2.ppm <- signif(co2.ppm,d=5) H <- i.data[j,3] wbar <- signif(wbar,d=4) ####3 will need some ozone values here cat("\n H=",H) cat("\n LE=",LE) cat("\n F(co2)=",Fco2) cat("\n [CO2]=",co2.ppm) cat("\n [H2O]=",W.avg) cat("\n HcorrF=",HcorrF) cat("\n\n") #browser() ####################### need to gather up data here################################33 tname <- c(ttag,endday, dday.lag, co2.ppm,W.avg,H,LE,Fco2,w.q,w.c,EcorrF,H.pf,wbar,HcorrF,O3flag[i],Cdday.j,ustar) #if(i == 1) browser() M2[j,1:22] <- tname #browser() dput(M2,outdata.bk) k <- endpt + 1 j <- j + 1 } ## end of if(Lsubset > (Npts/N)) else { k <- Ldata ### set k to last pt. to get out of while loop. } } ## end of while(k < Ldata) lastdday <- dday i <- i + 1 } ## end of if(Licor[i] == 1) else { i <- i + 1 ## increment the loop and move on cat("\n No CO2 data!!") j <- j + 1 ## increment j dday.j <- dday1[j] } } ## end of while(i < Lfile) statement cat("\n End of while loop!") #browser() #### At end - get rid of extra rows with NAs in them. #M1 <- M1[1:j,] ### put in quick filter for very crazy flux values: Hpf <- M2[,17] LE <- M2[,12] Fco2 <- M2[,13] flag1 <- ifelse(Fco2 > 20 | Fco2 < -30,1,0) flag2 <- ifelse(LE > 600 | LE < -100,1,0) flag3 <- ifelse(Hpf > 1000 | Hpf < -500,1,0) ##3 now substitute some stuff Fco2 <- ifelse(flag1 == 1,NA,Fco2) wc <- ifelse(flag1 == 1,NA,M2[,15]) LE <- ifelse(flag2 == 1,NA,LE) wq <- ifelse(flag2 == 1,NA,M2[,14]) Hpf <- ifelse(flag3 == 1,NA,Hpf) cat("\n Make some plots - if everthing looks OK - then output") par(mfrow=c(2,1)) plot(M2[,6], M2[,11],type="l",ylab="Sensible Heat") lines(M2[,6],Hpf,col=2,lty=2) cat("\n Black=Kaimal/Finnigan, Red=Planar Fit") plot(M2[,6],Hpf,type="l",ylab="Energy Fluxes") lines(M2[,6],LE,col=2) cat("\n Black=H, Red=LE") browser() ################################################# plot(M2[,6], M2[,9],type="l",ylab="CO2, ppm") plot(M2[,6],M2[,10],type="l",ylab="H2O, mmol mol-1") browser() ################################################ plot(M2[,6], LE,type="l",ylab="LE, W/m^2") plot(M2[,6], Fco2,type="l",ylab="Fco2, umol m-2 s-1") browser() ## now put back in substituted data: M2[,13] <- Fco2 M2[,12] <- LE M2[,14] <- wq M2[,15] <- wc M2[,17] <- Hpf ## then output. fun.asci(M2,outdata.bk) ################################################################################################ ################### see M2 for names of columns ############################################### } ######### end of if(SKIP2==F) else { ##### read in the data from previous run ### read in the text file that was computed previously ## read in the output file from the flux laptop D <- scan(outdata.bk,sep=" ",list(year=0,jday=0,hh=0,mm=0,ss=0,tstr=0,tend=0,tlag=0,co2=0,W=0,H=0,LE=0,Fco2=0,wq=0,wc=0,EcorrF=0,Hpf=0,wbar=0,HcorrF=0,O3flag=0,ddayJ2=0,ustar=0)) M2 <- cbind(D$year,D$jday,D$hh,D$mm,D$ss,D$tstr,D$tend,D$tlag,D$co2,D$W,D$H,D$LE,D$Fco2,D$wq,D$wc,D$EcorrF,D$Hpf,D$wbar,D$HcorrF,D$O3flag,D$ddayJ2,D$ustar) cat("\ Read in flux laptop data") } ###################################################################################################### ########## Now combine into final data ###################################################################### cat("\n Have both sets of data now: M1=Flux laptop, M2=BAckup laptop") browser() ################################################################################ ########### Now combine the 2 files into 1 complete data set ################################################################################ ## first - will merge all data into 1 file (M1) ## The backup laptop is usually the most complete - so we will use it as the "default" ## but want to use fluxes from the Flux laptop whenever possible (thse have RS-232 output - better measurement) #######Important - need to remember to look at O3 flag of M2 to see if we are doing O3 or Co2 ####### In this combination file - we only want CO2 fluxes - not O3 fluxes ### Initially get rid of any O3 fluxes from the backup laptop test <- M2[,20] M2[,9] <- ifelse(test==1,NA,M2[,9]) ## CO2 ppm M2[,13] <- ifelse(test==1,NA,M2[,13]) ## F(co2) - with corrections M2[,15] <- ifelse(test==1,NA,M2[,15]) ## w'c' i <- 1 ## index of M1 j <- 1 ## index of M2 k <- 1 ## ?? Lm1 <- length(M1[,1]) Lm2 <- length(M2[,1]) if(MAIN == 1) { Etime <- M2[Lm2,6] tstart <- M2[1,6] Etime2 <- M1[Lm1,6] } else { Etime <- M1[Lm1,6] tstart <- M1[1,6] Etime2 <- M2[Lm2,6] } deltaEtime <- Etime - Etime2 if(abs(deltaEtime) < 0.0008) { ## if less than 1 min. - make the last time tags equal in the 2 arrays M1[Lm1,1:7] <- M2[Lm2,1:7] } if(Etime > Etime2) { ### 2 data sets do not end at the same time: will cause endless loop below if(MAIN==1) M1 <- rbind(M1,M2[Lm2,]) else M2 <- rbind(M2,M1[Lm1,]) } ## end of if(Etime > Etime2) cat("Check length of each file") browser() while(tstart < Etime) { if(tstart == Etime) browser() if(i > Lm1) i <- i - 1 if(j > Lm2) j <- j - 1 ttag1 <- M1[i,6] ttag2 <- M2[j,6] deltaT <- ttag1 - ttag2 dtest <- (N/1440)/2 ## 1/2 of whatever the time averge period is. if(abs(deltaT) < dtest) { ## within 30 min. of each other M3row <- M1[i,] ## use Flux laptop as default i <- i + 1 ## increment all files j <- j + 1 #cat("\n tstart",tstart) } else { if(deltaT < 0) { ## then M1 ttag is earlier than M2 - use M1 data M3row <- M1[i,] i <- i + 1 ## only increment M1 cat("\n deltaT < 0", i,j) } if(deltaT > 0) { ## then M1 ttag is much later than M2 - use M2 data M3row <- M2[j,] j <- j + 1 ## increment M3 and M2 cat("\n deltaT > 0", i,j) #if(j==744) browser() } } ## end of else statement if(k == 1) { M3 <- M3row k <- k + 1 } else { M3 <- rbind(M3,M3row) } tstart <- M3row[6] #if(j==1440) browser() #cat("\n tstart:",tstart,"Etime:",Etime) } ## end of while loop cat("\n End of combo loop") ############################################################### fun.asci(M3, FinalData) browser() ##### now use M1 to get some diurnal averages. ### make an hrmin vector hrmin <- M1[,3]/100 + M1[,4]/60 ## get hrmin (0.0, 0.5, 1.0,... each 1/2 hr) hrmin <- round(hrmin,d=1) hrmin <- ifelse(hrmin > 23.75,0.0,hrmin) M4 <- cbind(M1[,6],hrmin,M1[,9:13],M1[,17]) WIND <- F DielAvg <- fun.diurnalavg(M4,WIND) par(mfrow=c(2,1)) ### plot diurnal medians. plot(DielAvg[,1],DielAvg[,8],type="o",ylab="CO2, ppm") plot(DielAvg[,1],DielAvg[,9],type="o",ylab="H2O, mmol mol-1") browser() plot(DielAvg[,1],DielAvg[,10],type="o",ylab="H") plot(DielAvg[,1],DielAvg[,11],type="o",ylab="LE") browser() plot(DielAvg[,1],DielAvg[,12],type="o",ylab="F(CO2) umol m-2 s-1") plot(DielAvg[,1],DielAvg[,10],type="o",ylab="Energy fluxes") lines(DielAvg[,1],DielAvg[,11],col=2,pch=4) cat("\n End of plots - do we want to output diurnal data??") browser() ######## Do we want to output this !!!! ### end of function } ##############3 subroutines ######################## fun.ati.despike <- function(x, name, n) { #### For ATI sonic only - gets rid of -99.99 values # Subroutine which takes a time series, x, with a given "name" # and filters out any obvious spikes in the data. # Can be used for sonic data(name = "u","v",etc), CO2 data (name = # "co2","h2o","t.ec",etc), Kr Hygrometer data (name = "h2o.kr"), etc. # The filter used is a essentially n*standard deviation. Anything # values that fall outside of n*sigma will be replaced with the # median value. ### get rid of -99.99 values ##### we will put in the median value of x test <- x < -90 z1 <- sum(test,na.rm=T) x <- ifelse(test == T, median(x,na.rm=T),x) # Define the filter - Our cut-off : y <- n*(var(x, na.rm=T)^0.5) # Look at positive spikes first : test.1 <- x > (median(x, na.rm=T) + y) z2 <- sum(test.1,na.rm=T) if(z2 > 0) { cat(" :\n replacing ", z2) cat(" (+) spikes in ",name) x <- ifelse(x > median(x, na.rm=T)+y, NA, x) } # Look at negative spikes : test.1 <- x < (median(x, na.rm=T) - y) z3 <- sum(test.1,na.rm=T) if(z3 > 0) { cat(" :\n replacing ", z3) cat(" (-) spikes in ",name) x <- ifelse(x < median(x, na.rm=T)-y, NA, x) } #### get the sum of all the spikes: SPIKE <- z1 + z3 + z2 # Generate a flag which tells when greater than 5% of the time series # has spikes in it. # per.spike <- (z2 + z3)/ length(x) # if(per.spike>0.04) { # SPIKE <- F } # else { # SPIKE <- T } # return list of filtered x and SPIKE flag list(x=x, SPIKE=SPIKE) } ######################################################################## fun.windstuff <- function(u,v,w, bearing) { ## calculate wind speed and direction # (1) calculate scalar wind speed ###ws <- (u^2 + v^2 + w^2)^0.5 ws <- (u^2 + v^2)^0.5 U <- mean(ws,na.rm=T) sdU <- var(ws, na.rm=T)^0.5 #### Simple average wind direction: ubar <- mean(u,na.rm=T) vbar <- mean(v, na.rm=T) wd2 <- atan(vbar/ubar)*(180/pi) ## +/- 90 degrees; relative to sonic wd2 <- ifelse(ubar >= 0, 360 - wd2, 180 - wd2) wd2 <- ifelse (wd2 >= 360, wd2-360, wd2) # now in 0-360 deg. relative to sonic ### put in bearing of sonic # correct for original sonic orientation wd2 <- wd2 + bearing wd2 <- ifelse (wd2 >= 360, wd2-360, wd2) ####################################3 #### method #3 for wind direction: theta <- atan(ubar/vbar)*(180/pi) ### get the correct hemisphere: wd3 <- ifelse(vbar > 0, (270 + theta),(90 + theta)) ### put in sonic bearing wd3 <- wd3 + bearing wd3 <- ifelse(wd3 > 360, wd3 - 360,wd3) # (2) original wind direction calculation d <- atan(v/u)*180/pi d <- ifelse(u >= 0, 360 - d, 180 - d) d <- ifelse (d >= 360, d-360, d) ### now 0-360 scale with respect to sonic # correct for original sonic orientation d <- d + bearing d <- ifelse (d >= 360, d-360, d) ### Jan. 2001 - this is the correct way to get average wind direction. wd <- d*(pi/180) # get into radians Ux <- (mean(sin(wd),na.rm=T)) Uy <- (mean(cos(wd),na.rm=T)) wdir <- (atan(Ux/Uy))*(180/pi) ## Now get the proper quadrant : if(Uy < 0) wdir <- wdir + 180 if(Uy > 0) wdir <- ifelse(wdir < 0, 360+wdir, wdir) Udir <- wdir ## Vector Wind speed : Ue <- mean((ws*sin(wd)),na.rm=T) Un <- mean((ws*cos(wd)),na.rm=T) Vws <- (Ue^2 + Un^2)^0.5 # New way of getting std. dev. of wind direction ## From Campbell 23x book - Yamartino algorithm (pp11-2) epsil <- ( 1 - (Ux^2 + Uy^2))^.5 sdUdir <- asin(epsil)*(1 + (0.1547*epsil^3)) sdUdir <- sdUdir*(180/pi) list(U=U,Udir=Udir,Vws=Vws,sdU=sdU,sdUdir=sdUdir, wd2=wd2, wd3=wd3) } ############################################################### fun.skewness <- function(x,OUT=F) { ## function designed to take a time series of x and ## calculate the mean, median, std. dev., skewness and ## kurtosis. # get rid of NAs for the rest : xx <- is.na(x) x <- x[xx==F] N <- length(x) xbar <- mean(x,na.rm=T) xmed <- median(x,na.rm=T) sdx <- var(x, na.rm=T)^.5 Skx <- mean((x - mean(x))^3) / (sdx^3) Krx <- mean((x - mean(x))^4) / (sdx^4) if(OUT==T) { cat("\n N = ",N) cat("\n xbar = ",xbar) cat("\n xmed = ",xmed) cat("\n sdx = ",sdx) cat("\n Skx = ",Skx) cat("\n Krx = ",Krx) } list(N=N,xbar=xbar,xmed=xmed,sdx=sdx,Skx=Skx,Krx=Krx) #invisible() # end function } ################################################################ fun.coord.rot <- function(u, v, w, ubar, vbar, wbar) # # This function performs a 3-D coordinate rotation on the time # series u, v, and w, after Kaimal and Finnigan, "Atmospheric # Boundary Layer Flows", (1994), pg 236. # # This aligns the resulting time series, u3, v3, w3, with the mean # wind vector, forcing v3 and w3 means to zero. # # Returns a list including the rotated u, v, w time series, and # the angles of rotation. { ### atan == inverse tangent - returns value in radians theta <- atan( vbar / ubar ) u2 <- u*cos(theta) + v*sin(theta) v <- v*cos(theta) - u*sin(theta) phi <- atan( wbar / mean(u2, na.rm=T) ) u <- u2*cos(phi) + w*sin(phi) w <- w*cos(phi) - u2*sin(phi) ## return a list of rotated time series list(u=u, v=v, w=w, theta=theta, phi=phi) } ########################################################### fun.length.scale <- function(x, U, deltat) { ## function designed to calculate the ## Eulerian time and length scale from a time series ### first - replace all the NA's in a time series with the mean value. ### (we assume there are not many NAs) p <- is.na(x) x2 <- ifelse(p==T,mean(x,na.rm=T),x) x <- x2 acv <- acf(x, "correlation", lag=500, plot=F) ## note - if R gives lags in samples then don't need deltat ## if lags are in # samples - then need deltat!!! Tw <- deltat*sum(acv$acf) #Tw <- sum(acv$acf) Lw <- abs(U*Tw) ## combine the terms y <- c(Tw,Lw) y } ############################################################## fun.aeroR <- function(U,ustar,z.L,name,method) { ## function to calculate aerodynamic and sublaminar resistance ## species for Rb = determined by "name" ln ## need some constants for Niwot Ridge z0 <- 1.7 # roughness, m, Niwot ridge LAI <- 4.1 # LAI, m2 m-2, Niwot Ridge Ln <- 0.001 # needle width (m) Dh2o <- 0.227e-4 # Diff. coeff. of water, m2 s-1 nu <- 1.461e-5 # viscosity of air (m2 s-1) ### first -need stability functions: N <- (1 - (16*z.L))^.25 N2 <- 2*(log((1 + N^2)/2)) ## if neutral or stable: phiH = 5.2*z.L, otherwise N2 (Dyer, 1976) phiH <- ifelse(z.L > -0.1,(5.2*z.L),N2) N3 <- 2*((1 + N)/2) + log((1 + N^2)/2) - 2*atan(N) + 3.414/2 phiM <- ifelse(z.L > -0.1,(5.2*z.L),N3) ## now calculate Ra Ra <- (U/ustar^2) - (phiH - phiM)/(0.4*ustar) ## now for Rb ## need diffusion coefficients for species: if(name == "O3") Di <- 0.1385e-4 # m2 s-1 if(name == "PAN") Di <- 0.86e-5 if(name == "HNO3") Di <- 0.118e-4 if(name == "heat") Di <- 0.142e-4 # Reynolds number Re <- ustar*z0/nu # Schmidt number Sc <- nu/Di if(method == 0) Rb <- (1.45/ustar)*(Re^0.24)*(Sc^0.8) if(method == 1) { JH <- ((100*Ln*ustar)/((LAI^2)*nu))^.3333 Rb <- Sc*JH/ustar } if(method == 2) Rb <- (2/0.4*ustar)*(Sc/0.72)^0.6666 if(method == 3) { Me <- (U*Ln/nu)^0.5 Rb <- Ln/(Di*0.66*Me*(Sc^0.333)) } #browser() list(Ra=Ra,Rb=Rb) } ########################################################## fun.timetag <- function(t1) { # function to convert a POSIXct object to a numeric string of the type: # Julian day, hour of day, min of day, sec # output as a vector of numbers: year <- strftime(t1, "%Y") year <- (as.numeric(year)) jday <- strftime(t1,"%j") jday <- as.numeric(jday) hday <- strftime(t1,"%H") hday <- (as.numeric(hday))*100 mday <- strftime(t1, "%M") mday <- as.numeric(mday) Sday <- strftime(t1, "%S") Sday <- as.numeric(Sday) ## put in to vector ttag <- c(year,jday,hday,mday,Sday) } ################################################## fun.asci <- function(x,data.file) { ## function to output an asci file y <- matrix(NA,nrow=length(x[1,]),ncol=length(x[,1])) i <- 1 while(i <= length(x[1,])) { y[i,] <- x[,i] i <- i+1 } ### Output data: write(y,data.file,ncol=length(x[1,])) } ################################################################# fun.despike <- function(x, name, n) { # Subroutine which takes a time series, x, with a given "name" # and filters out any obvious spikes in the data. # Can be used for sonic data(name = "u","v",etc), CO2 data (name = # "co2","h2o","t.ec",etc), Kr Hygrometer data (name = "h2o.kr"), etc. # The filter used is a essentially n*standard deviation. Anything # values that fall outside of n*sigma will be replaced with the # median value. #browser() # Define the filter - Our cut-off : y <- n*(var(x, na.rm=T)^0.5) # Look at positive spikes first : test.1 <- x > (median(x, na.rm=T) + y) z2 <- sum(test.1,na.rm=T) if(z2 > 0) { cat(" :\n replacing ", z2) cat(" (+) spikes in ",name) x <- ifelse(x > median(x, na.rm=T)+y, NA, x) } # Look at negative spikes : test.1 <- x < (median(x, na.rm=T) - y) z3 <- sum(test.1,na.rm=T) if(z3 > 0) { cat(" :\n replacing ", z3) cat(" (-) spikes in ",name) x <- ifelse(x < median(x, na.rm=T)-y, NA, x) } #### get the sum of all the spikes: SPIKE <- z3 + z2 #browser() # Generate a flag which tells when greater than 5% of the time series # has spikes in it. # per.spike <- (z2 + z3)/ length(x) # if(per.spike>0.04) { # SPIKE <- F } # else { # SPIKE <- T } # # return list of filtered x and SPIKE flag list(x=x, SPIKE=SPIKE) } ######################################################################## fun.zero.despike <- function(u,v,w,tc) { # Subroutine used in early 2008 # getting spikes where all values of u,v,w, and tc were equal to zero. # this removes those and replaces with median ### first look for when tc == 0 test1 <- u == 0 & v == 0 & w == 0 z3 <- sum(test1,na.rm=T) if(z3 > 0) { cat(" :\n replacing ", z3,"zero spikes") u <- ifelse(test1==T, NA, u) v <- ifelse(test1==T, NA, v) w <- ifelse(test1==T, NA, w) tc <- ifelse(test1==T, NA, tc) } # return list of filtered x and SPIKE flag list(u=u, v=v, w=w, tc=tc, z3=z3) } ############################################################################# fun.RMY.despike <- function(u,v,w,tc) { # Subroutine used in early 2008 # getting spikes where all values of u,v,w, and tc were equal to zero. # this removes those and replaces with median ### first look for when tc >= 350 test1 <- tc >= 350 z3 <- sum(test1,na.rm=T) if(z3 > 0) { cat(" :\n replacing ", z3,"Tc spikes") u <- ifelse(test1==T, NA, u) v <- ifelse(test1==T, NA, v) w <- ifelse(test1==T, NA, w) tc <- ifelse(test1==T, NA, tc) } ### first look for when w >= 30 test2 <- w >= 30 z4 <- sum(test1,na.rm=T) if(z4 > 0) { cat(" :\n replacing ", z4,"w spikes") u <- ifelse(test2==T, NA, u) v <- ifelse(test2==T, NA, v) w <- ifelse(test2==T, NA, w) tc <- ifelse(test2==T, NA, tc) } # return list of filtered x and SPIKE flag list(u=u, v=v, w=w, tc=tc, z3=z3) } ############################################################################# fun.roughness <- function(z,z.displ,Ubar,ustar) { ## function to calculate aerodynamic roughness length from single height ## sonic measurements - really only valid in neutral conditions k <- 0.4 #von Karman constant Q <- exp(Ubar*k/ustar) zo <- (z - z.displ)/Q ### units should be meters! zo } ############################################################################ ########################################################## fun.eddydiffusity <- function(ustar,z.L1,z.L2,z1,z2) { ### subroutine to calculate the eddy diffusivity for flux gradient measurements ### uses 2 gradient heights (z1 and z2, in m with z1 > z2) ### also needs z/L at both heights and u* (from 1 height or taken as avg. of both heights) ### eqns. are directly from Edwards et al., JGR 110, D10306, 2005, but can be found in many ### places (maybe in slightly varying forms). ### stability functions are from vKc <- 0.4 # von Karmans constant ########## calculate integrated stability functions at each height. ### first -need stability functions: N <- (1 - (16*z.L1))^.25 N2 <- 2*(log((1 + N^2)/2)) ## if stable: phiH = -5.2*z.L, otherwise N2 (Dyer, 1976) phiH1 <- ifelse(z.L1 > 0.15,(-5.2*z.L1),0) phiH1 <- ifelse(z.L1 < -0.15,N2,phiH1) ################################################ N <- (1 - (16*z.L2))^.25 N2 <- 2*(log((1 + N^2)/2)) ## if stable: phiH = 5.2*z.L, otherwise N2 (Dyer, 1976) phiH2 <- ifelse(z.L2 > 0.15,(-5.2*z.L2),0) phiH2 <- ifelse(z.L1 < -0.15,N2,phiH2) ################################################################### P <- log(z1/z2) K <- (vKc*ustar)/(P - phiH1 + phiH2) #### actually this is Kedd/d(z) delta.z <- z1 - z2 Kedd <- K*delta.z Cneut <- (vKc*ustar)/log(z1/z2) Kneut1 <- (vKc*ustar)*z1 Kneut2 <- (vKc*ustar)*z2 list(Kedd=Kedd,K=K,Cneut=Cneut,Kneut1=Kneut1,Kneut2=Kneut2) } ###################################################### ############################################################## fun.diurnalavg <- function(X,WIND) { ## Subroutine to get diurnal avg from time series of profiles for a single species # X = time series with columns: doy, hrmin, + however many columns you want to average. ## if(WIND == T) then 4th column of X is wind direction and must be vector-avg'd Lcol <- length(X[1,]) ## number of input columns Ldcol <- Lcol - 2 NumCol <- (Ldcol*5) + 1 ## number of output columns i <- 0.0 j <- 1 Y <- matrix(NA,ncol=(NumCol),nrow=48) hrmin <- X[,2] #browser() while(i <= 23.5) { #if(i == 7.0) browser() test <- hrmin == i ### should index all the points at 0.0 hours Xtest <- X[test,] ### get means, medians and std. dev.and # of points (minus the NAs) P <- is.na(Xtest[,3:Lcol]) Lna <- Lcol - 2 P[,1:Lna] <- ifelse(P[,1:Lna] == 1,0,1) ### switch around na matrix. N <- apply(P,2,sum,na.rm=T) ### number of pts. xbar <- apply(Xtest[,3:Lcol], 2,mean,na.rm=T) xmed <- apply(Xtest[,3:Lcol],2,median,na.rm=T) xvar <- apply(Xtest[,3:Lcol],2,var,na.rm=T) xstd <- (xvar)^0.5 xsterr <- xstd/(N^.5) if(WIND == T) { WD <- Xtest[,4] ### should be 4th column wd <- WD*(pi/180) # get into radians Ux <- (mean(sin(wd),na.rm=T)) Uy <- (mean(cos(wd),na.rm=T)) wdir <- (atan(Ux/Uy))*(180/pi) ## Now get the proper quadrant : if(Uy < 0) wdir <- wdir + 180 if(Uy > 0) wdir <- ifelse(wdir < 0, 360+wdir, wdir) Udir <- wdir ## UDIR is avg. wind direction # New way of getting std. dev. of wind direction ## From Campbell 23x book - Yamartino algorithm (pp11-2) epsil <- ( 1 - (Ux^2 + Uy^2))^.5 sdUdir <- asin(epsil)*(1 + (0.1547*epsil^3)) sdUdir <- sdUdir*(180/pi) sderrUdir <- sdUdir/((N[2])^.5) } Lxbar <- length(xbar) #cat("\n Stop and test Diurnal Avg. subroutine") #if(i == 0) browser() ### put into a row of output matrix if(WIND == F) {Y[j,] <- c(i,N,xbar,xmed,xstd,xsterr) } else { Y[j,] <- c(i,N,xbar[1],Udir,xbar[3:Lxbar],xmed,xstd[1],sdUdir,xstd[3:Lxbar],xsterr[1],sderrUdir,xsterr[3:Lxbar]) } ########################### ## now move to the next 1/2 hour i <- i + 0.5 j <- j + 1 } ## end of the while loop ## output Y Y } # end function ########################################################################## ###################################################### fun.Ltimetag <- function(t1) { # function to convert a POSIXct object to a numeric string of the type: # Julian day, hour of day, min of day, sec # output as a vector of numbers: year <- strftime(t1, "%Y") year <- (as.numeric(year)) jday <- strftime(t1,"%j") jday <- as.numeric(jday) hday <- strftime(t1,"%H") hday <- (as.numeric(hday))*100 mday <- strftime(t1, "%M") mday <- as.numeric(mday) Sday <- strftime(t1, "%S") Sday <- as.numeric(Sday) ## put in to vector ttag <- cbind(year,jday,hday,mday,Sday) } ################################################## ######################################################################## fun.crosscorrelation <- function(x,y,tlag, CCplot) { #browser() a <- ccf(x,y,PLOT=F,lag.max=tlag) CClags <- a[[4]] ######### using absolute value - all R-values are positive now!! Rcoef <- a[[1]] par(mfrow=c(1,1)) if(CCplot==T) { plot(CClags,Rcoef,type="o") } ######## get the covariance between x and y to see the direction of the flux: x_y <- var(x,y,na.rm=T)^.5 if(x_y > 0) { Rmax <- signif(max(Rcoef),d=5) tmax <- CClags[Rcoef==max(Rcoef)] } else { Rmax <- signif(min(Rcoef),d=5) tmax <- CClags[Rcoef==min(Rcoef)] } # Output the time, lag times a # # echo to screen #cat("\n t(Rmin) :", tmin ) #cat("\n t(Rmax) : ", tmax ) #cat("\n Rmin : ", signif(Rmin,d=5)) #cat("\n Rmax : ",signif(Rmax,d=5)) #cat("\n\n") # return list of values list(Rmax=Rmax,tmax=tmax) } ######################################################## ######################################################## fun.Addminute <- function(t1) { ### function to "round" a time tag to the next minute ### t1 = vector of yy,jday,hrx100,min,sec,DOY ### Assume we want to round up to the next minute: t1[5] <- 0 t1[4] <- t1[4] + 1 cat("\n 59 sec alarm!") if(t1[4] == 60) { t1[4] <- t1[4] - 60 t1[3] <- t1[3] + 100 cat("\n hour alarm!") if(t1[3] == 2400) { t1[3] <- t1[3] - 2400 t1[2] <- t1[2] + 1 cat("\n day alarm!") ### until Dec. 31 - won't worry about changing years. } } ## output t1 t1 ######## end of function } ##################################################################################### fun.topo.rotate <- function(u,v,w,month) { ## Rotation designed to rotate sonic velocity coefficients using ## the planar fit method of Wilczak et al., BLM, 99 (2001), 127. ## 1st rotates u, v and w to a plane derived from multiple regression ## of w.raw vs. u.raw and v.raw. From these coefficients - transform ## matrix is formed to do rotation around v-axis (u-w plane) and then ## around u-axis (v-w plane). u and v-axis are the raw sonic coordinates ##########################################################3 ## Mult. regression taken from data : Jan. 2001-July, 2001 ## N = 9659 ## get good regression fit : R^2 = 0.975 ########### For Niwot Ridge ############################# ## alpha <- rotation around v-axis ## = 0.039794 rads (= 2.28 deg.) ## beta <- rotation around u-axis ## = 0.09285 rads (= 5.32 deg.) ## w-offset <- intercept of regression ## = -0.0052 ########################################################## ########### For Manitou ############################# ### 2010###################### if(month=="nov") { alpha <- 3.222 beta <- 1.378 offset <- 0.000038 } if(month=="dec") { alpha <- 3.222 beta <- 1.378 offset <- 0.000038 } ### 2011 ##################### if(month=="jan") { alpha <- 3.202 beta <- 2.046 offset <- -0.019 ### From BAckup data ### from Flux Laptop: alpha=3.147,Beta=2.065, woff=-0.0186 } if(month=="feb") { alpha <- 3.179 beta <- 2.017 offset <- -0.0176 } if(month=="mar") { alpha <- 3.285 beta <- 2.249 offset <- -0.0105 } if(month=="apr") { alpha <- 3.134 beta <- 2.239 offset <- -0.0197 # input on 5/11/11 } if(month=="may" ) { alpha <- 3.141 beta <- 2.433 offset <- -0.0206 } if(month=="jun" ) { alpha <- 3.110 beta <- 2.302 offset <- -0.01126 } if(month=="jul" | month == "julB" ) { alpha <- 3.036 beta <- 2.354 offset <- -0.0158 } if(month=="aug" | month == "augB" ) { #cat("\n not quite right on Aug PF") alpha <- 3.736 beta <- 1.827 offset <- -0.00014 } if(month=="sep" ) { alpha <- 3.737 beta <- 1.863 offset <- -0.0653 } if(month=="oct" ) { alpha <- 3.095 beta <- 2.146 offset <- -0.0196 } if(month=="nov" ) { alpha <- 3.028 beta <- 2.072 offset <- -0.0181 } ################################### ## Convert to radians: alpha <- alpha/57.2958 beta <- beta/57.2958 w <- w - offset u2 <- cos(alpha)*u + sin(alpha)*sin(beta)*v - sin(alpha)*cos(beta)*w v2 <- cos(beta)*v + sin(beta)*w w2 <- sin(alpha)*u - sin(beta)*cos(alpha)*v + cos(alpha)*cos(beta)*w ### Now get final rotation around w-axis (gamma) ubar <- mean(u2,na.rm=T) vbar <- mean(v2,na.rm=T) gamma <- atan(vbar/ubar) u3 <- cos(gamma)*u2 + sin(gamma)*v2 v3 <- -1*sin(gamma)*u2 + cos(gamma)*v2 ## return a list of data list(u3=u3,v3=v3,w2=w2,gamma=gamma) # end of subroutine } ########################################################## fun.detrend <- function(x,tstep) { # function to detrend a time series (actually just a vector) Lx <- length(x) avg.per <- Lx*tstep # length of data index <- seq(from=tstep, to=avg.per,by=tstep) x.lm <- lm(x ~ index) b <- x.lm$coef[1] m <- x.lm$coef[2] xcalc <- b + m*index x <- x - xcalc } ########################################################## fun.LImissample <- function(co2,h2o,Zero,Span) { ## subroutine to find Li7000 data lines that are all 0.00's ## will replace with NA to begin with. p <- co2 == 0.0 & h2o == 0.0 NoMiss <- sum(p,na.rm=T) co2 <- ifelse(p==T,NA,co2) h2o <- ifelse(p==T,NA,h2o) Zero <- ifelse(p==T,NA,Zero) Span <- ifelse(p==T,NA,Span) cat("\n # of Li7000 missed samples:",NoMiss) ### return list: list(co2=co2,h2o=h2o,Zero=Zero,Span=Span) } ##########################################################