fun.Montes_FileTest <- function(EXP=2) { ### function to split up huge raw data file from Montseny/Monegros ## EXP == 1 (Montseny, April), == 2 (Monegros,July) ### April data if(EXP==1) { ## Montseny , April Exp. infile <- "i:/MONTES010/Montseny/MontsenyData6.son" d1 <- matrix(scan(infile,sep="\t",list(date="",msec=0,u=0,v=0,w=0,tc=0,A1=0,A2=0,A3=0,A4=0,Err=0,junk=0)),byrow=T,ncol=12) ### parse the data date1 <- as.ts(d1[[1]]) msec <- as.ts(d1[[2]]) PARdir <- as.ts(d1[[8]]) PARdiff <- as.ts(d1[[9]]) Rnet <- as.ts(d1[[10]]) ### sonic time series: u <- as.ts(d1[[3]]) v <- as.ts(d1[[4]]) w <- as.ts(d1[[5]]) tc <- as.ts(d1[[6]]) q <- as.ts(d1[[7]]) Err <- as.ts(d1[[11]]) } if(EXP==2) { ### Monegros, July data infile <- "i:/MONTES010/July Data/Monegros_july/MonegrosData5.son" d1 <- matrix(scan(infile,sep="\t",list(date="",msec=0,u=0,v=0,w=0,tc=0,Err=0,A1=0,A2=0,A3=0,A4=0,junk=0)),byrow=T,ncol=12) ### parse the data date1 <- as.ts(d1[[1]]) msec <- as.ts(d1[[2]]) PARdir <- as.ts(d1[[9]]) PARdiff <- as.ts(d1[[10]]) Rnet <- as.ts(d1[[11]]) ### sonic time series: u <- as.ts(d1[[3]]) v <- as.ts(d1[[4]]) w <- as.ts(d1[[5]]) tc <- as.ts(d1[[6]]) q <- as.ts(d1[[8]]) Err <- as.ts(d1[[7]]) } Ldata <- length(date1) #########3 check the date format in raw data files!!! date1 <- strptime(date1, "%m/%d/%Y %H:%M:%S") date1 <- fun.Ltimetag(date1) ## get decimal day: dday <- date1[,2] + (date1[,3]/2400) + (date1[,4]/1440) hour.tag <- date1[,3]/100 ## get hour of day. dday <- signif(dday,d=7) date1 <- cbind(date1,dday) ## add decimal day to date. Data <- cbind(dday,msec,u,v,w,tc,q,PARdir,PARdiff,Rnet,Err) ## want to get the difference between each hour: d.hour <- diff(hour.tag) d.hour <- c(1,d.hour) ## put in a zero at the end so they match up. index <- seq(from=1,to=Ldata,by=1) test <- abs(d.hour) >= 1 I.hour <- index[test] Lhour <- length(I.hour) i <- 1 j <- i + 1 #### Main Loop begin: while(i < length(I.hour)) { cat("\n Loop #:",i) cat("\n\n") ### get data between point index values: i and j Istart <- I.hour[i] Iend <- I.hour[j] #if(i == 1) browser() Data.i <- Data[Istart:Iend,] dday.i <- dday[Istart:Iend] hour.i <- hour.tag[Istart:Iend] test.i <- sum(test[Istart:Iend],na.rm=T) test2 <- length(Data.i[,1]) cat("\n Hour:",mean(hour.i,na.rm=T)) cat("\n s(Hour):",var(hour.i,na.rm=T)) cat("\n Sum of Test:",test.i) cat("\n Length of file:",test2) cat("\n\n") cat("\n Ready to output file - Give: File.i <- filename") cat("\n\n") #### File.i <- "i:/MONTES010/July Data/Monegros_july/Monegros20100713_1747.son" browser() ## will manually give filename depending upon hour of data. if(length(dday.i) > 100) fun.asci(Data.i,File.i) i <- j j <- j + 1 } ## end of while loop cat("\n end of datafiles") browser() ### end of function ############# } ######################################################################################## ##############3 subroutines ######################## fun.despike <- function(x, name, n) { # Subroutine which takes a time series, x, with a given "name" # and filters out any obvious spikes in the data. # Can be used for sonic data(name = "u","v",etc), CO2 data (name = # "co2","h2o","t.ec",etc), Kr Hygrometer data (name = "h2o.kr"), etc. # The filter used is a essentially n*standard deviation. Anything # values that fall outside of n*sigma will be replaced with the # median value. # Define the filter - Our cut-off : y <- n*(var(x,na.rm=T)^0.5) # Look at positive spikes first : test.1 <- x > (median(x,na.rm=T) + y) z2 <- sum(test.1,na.rm=T) if(z2 > 0) { cat(" :\n replacing ", z2) cat(" (+) spikes in ",name) x <- ifelse(x > median(x,na.rm=T)+y, median(x,na.rm=T), x) } # Look at negative spikes : test.1 <- x < (median(x,na.rm=T) - y) z3 <- sum(test.1,na.rm=T) if(z3 > 0) { cat(" :\n replacing ", z3) cat(" (-) spikes in ",name) x <- ifelse(x < median(x,na.rm=T)-y, median(x,na.rm=T), x) } # Generate a flag which tells when greater than 5% of the time series # has spikes in it. per.spike <- (z2 + z3)/ length(x) if(per.spike>0.04) { SPIKE <- F } else { SPIKE <- T } # return list of filtered x and SPIKE flag list(x=x, SPIKE=SPIKE) } ######################################################################## fun.fastplot <- function(t,x, Label) { # subroutine to plot profiles - can change xax and yax ## t = time tag matrix - has six columns with six time tags ## x = matrix with 6 levels (columns 1-6, levels 1-6) ## Label = characters for ylabel p <- is.na(x[,1]) ind <- seq(from=1,to=length(p),by=1) samples <- ind[p==0] I.start <- min(samples,na.rm=T) I.end <- max(samples,na.rm=T) xax <- c(t[I.start,1],t[I.end,1]) y.min <- min(x,na.rm=T) y.max <- max(x,na.rm=T) yax <- c(y.min,y.max) #browser() plot(t[,1],x[,1],type="l",xax,ylab=Label,ylim=yax) lines(t[,2],x[,2],col=2) lines(t[,3],x[,3],col=3) lines(t[,4],x[,4],col=4) lines(t[,5],x[,5],col=5) lines(t[,6],x[,6],col=6) #### create legend leg.names <- c("z=2m","z=10.9m","z=16.7m","z=23.9m","z=30.3m","z=39.7m") #colors <- c("black","red","green","blue","cyano","magenta") #legend(locator(1),leg.names,col=c(1,2,3,4,5,6),fill=T) #legend(5,1,leg.names,col=c(1,2,3,4,5,6),fill=T) text(locator(1),"black=2m, red=10.9m, grn=16.7m, blue=23.9m, cyano=30.3m, mag.=39.7m") } ################################################## fun.asci <- function(x,data.file) { ## function to output an asci file y <- matrix(NA,nrow=length(x[1,]),ncol=length(x[,1])) i <- 1 while(i <= length(x[1,])) { y[i,] <- x[,i] i <- i+1 } ### Output data: write(y,data.file,ncol=length(x[1,])) } ###################################################################### fun.Camptimetag <- function(day,hrmin,sec) # # Converts Campbell-type time tags into decimal day time tags: # note - we assume that we know the year!!!! { ## get hours and minutes separated hr <- floor(hrmin/100) ## give hour of day min <- (hrmin/100) - floor(hrmin/100) ## min/100 min <- 100*min ## put everything in days and add it up: min.sec <- min + sec/60 hr.min <- hr + min.sec/60 ttag <- day + hr.min/24 ttag <- signif(ttag,d=8) # output ttag ttag } ############################################################### fun.linearextrap <- function(index,x,xx,yy) { ## subroutine to take a time series (x) and and index (0->length) ## and a set of index pts/calibration points (xx and yy) and create a ## time series of calibration constants. ### does linear extrapolation between different xx points. ### xx == vector of indices ### yy == corresponding signals (for zero's) or gain coeff. (for spans) cat("\nTest linear extrap") #browser() ### do line fit between each pt. Lz <- length(xx) - 1 i <- 1 while(i <= Lz) { j <- i + 1 x1 <- xx[i:j] y1 <- yy[i:j] #browser() Lfit <- lm.fit <- lm(y1 ~ x1) Int <- Lfit$coeff[1] m <- Lfit$coeff[2] x[xx[i]:xx[j]] <- Int + (m*index[xx[i]:xx[j]]) i <- i + 1 } ###end of loop #### then use last zero points to fill out the zero time series Lz <- length(yy) Last.zero <- yy[Lz] Li <- length(index) Lend <- xx[Lz] x[Lend:Li] <- yy[Lz] x } ##################################################################### fun.skewness <- function(x,OUT=F) { ## function designed to take a time series of x and ## calculate the mean, median, std. dev., skewness and ## kurtosis. # get rid of NAs for the rest : xx <- is.na(x) if(sum(xx,na.rm=T) < length(xx)) { N <- length(x) - sum(xx,na.rm=T) xbar <- mean(x,na.rm=T) xmed <- median(x,na.rm=T) x.min <- min(x,na.rm=T) x.max <- max(x,na.rm=T) sdx <- var(x,na.rm=T)^.5 Skx <- mean((x - mean(x))^3) / (sdx^3) Krx <- mean((x - mean(x))^4) / (sdx^4) if(OUT==T) { cat("\n N = ",N) cat("\n xbar = ",xbar) cat("\n xmed = ",xmed) cat("\n sdx = ",sdx) cat("\n x.min = ",x.min) cat("\n x.max = ",x.max) } } ## end of if(sum < length) else { N <- 0 xbar <- NA xmed <- NA sdx <- NA Skx <- NA Krx <- NA } list(N=N,xbar=xbar,xmed=xmed,sdx=sdx,Skx=Skx,Krx=Krx,x.min=x.min,x.max=x.max) #invisible() # end function } ############################################################## ###################################################### fun.Ltimetag <- function(t1) { # function to convert a POSIXct object to a numeric string of the type: # Julian day, hour of day, min of day, sec # output as a vector of numbers: year <- strftime(t1, "%Y") year <- (as.integer(year)) jday <- strftime(t1,"%j") jday <- as.integer(jday) hday <- strftime(t1,"%H") hday <- (as.integer(hday))*100 mday <- strftime(t1, "%M") mday <- as.integer(mday) Sday <- strftime(t1, "%S") Sday <- as.integer(Sday) ## put in to vector ttag <- cbind(year,jday,hday,mday,Sday) } ################################################## ######################################################################## fun.windaverage <- function(WD,WS) { ## inputs - vectors of wind direction and wind speed that we ## want to average. ## calculate wind speed and direction # (1) calculate scalar wind speed U <- mean(WS,na.rm=T) stdU <- var(WS, na.rm=T)^0.5 ### now 0-360 scale ### Jan. 2001 - this is the correct way to get average wind direction. wd <- WD*(pi/180) # get into radians Ux <- (mean(sin(wd),na.rm=T)) Uy <- (mean(cos(wd),na.rm=T)) wdir <- (atan(Ux/Uy))*(180/pi) ## Now get the proper quadrant : if(Uy < 0) wdir <- wdir + 180 if(Uy > 0) wdir <- ifelse(wdir < 0, 360+wdir, wdir) avgWD <- wdir ## Vector Wind speed : Ue <- mean((WS*sin(wd)),na.rm=T) Un <- mean((WS*cos(wd)),na.rm=T) Vws <- (Ue^2 + Un^2)^0.5 # New way of getting std. dev. of wind direction ## From Campbell 23x book - Yamartino algorithm (pp11-2) epsil <- ( 1 - (Ux^2 + Uy^2))^.5 sdUdir <- asin(epsil)*(1 + (0.1547*epsil^3)) stdWD <- sdUdir*(180/pi) list(U=U,avgWD=avgWD,Vws=Vws,stdU=stdU,stdWD=stdWD) } ############################################################### ############################################################################# fun.RMY.despike <- function(u,v,w,tc) { # Subroutine used in early 2008 # getting spikes where all values of u,v,w, and tc were equal to zero. # this removes those and replaces with median ### first look for when tc >= 350 test1 <- tc >= 350 z3 <- sum(test1,na.rm=T) if(z3 > 0) { cat(" :\n replacing ", z3,"Tc spikes") u <- ifelse(test1==T, NA, u) v <- ifelse(test1==T, NA, v) w <- ifelse(test1==T, NA, w) tc <- ifelse(test1==T, NA, tc) } ### first look for when w >= 30 test2 <- w >= 30 z4 <- sum(test1,na.rm=T) if(z4 > 0) { cat(" :\n replacing ", z4,"w spikes") u <- ifelse(test2==T, NA, u) v <- ifelse(test2==T, NA, v) w <- ifelse(test2==T, NA, w) tc <- ifelse(test2==T, NA, tc) } # return list of filtered x and SPIKE flag list(u=u, v=v, w=w, tc=tc, z3=z3) } ############################################################################# fun.ECflux <- function(X, i) { #fun.lat.heat1 <- function(w, H2O.raw, Tbar, wT.cov, P, rhov, sigma, rho, H,t1, Lv, cpm) # Subroutine which calculates the sensible and latent heat flux using sonic data and # the Campbell Kr hygrometer. # Corrects H2O vapor flux for oxygen absorption. Written by # Turnipseed mainly, with some DB additions. 10/97 # Inputs # X: matrix of w, tc, and q # H2O.raw: time series, mV signal from Kr hygrometer # w: time series (presumably rotated) for w (m/s) # Tbar: mean sonic or other temp (Kelvin) # wT.cov: covariance of w and T (m/s * K) # P: ambient P (kPa) # rhov : Avg. H2O vapor density, kg m-3 # sigma : rhov/rhoa # rho : Air density, kg m-3 # H : Sensible Heat flux, W m-2 # # returns: H2O.flux (flux in kg/m^3 * m/s) # E1 : latent heat flux, W m-2 ## parse X w <- X[,1] tc <- X[,2] q <- X[,3] #####3 need to scale q for 5/4 mV response in the RM Young sonic q <- q*(5/4) ######## first need some paramters/numbers Tbar <- mean(tc,na.rm=T) ## mean T, oC Tbar.K <- Tbar + 273.15 ## mean T, K P <- 100 ## mean pressure, kPa (just an initial guess) rho <- P/(Tbar.K * 0.287) # R = 0.287 (kPa kg / K m^3) # units are kg m^-3 cp <- 1004.67 Lv <- (2.501 - (0.00237*Tbar))*1e6 ### J kg-1 (lat. heat of vapor of H2O) ## Sensible heat flux: w.tc <- var(w,tc, na.rm=T) H <- rho*cp*w.tc cat("\n Test EC subroutine") #if(i == 1) browser() ## now Kr hyg latent heat flux ## First - make sure that the H2O.raw time series contains no negative ## numbers - just make it an absolute value and write an error ## message!! q <- ifelse(q < 0, median(q,na.rm=T),q) # Campbell method for correcting the H2O flux for O2 absorption # sets of coefficients to use (from Campbell calibrations for # EOL instrument - serial # 1395 ## Need : pathlength = plength ## : abs. coeff = kw (this varies from wet to dry range) ko <- 0.0045 #O2 abs. coefficient, m^3 g-1 cm-1 plength <- 1.204 # pathlength in cm kw <- 0.156 # m^3 g-1 mV-1 Vo <- 4752 ############################################## h2o <- (1/(-kw*plength))*(log(q) - log(Vo)) # in g/m^3 h2o <- mean(h2o,na.rm=T) # in g/m^3 rhov <- h2o/1000 # rhov in kg/m^3 ## from Campbell eddy correlation manual - appendix A, page A2 ## use eqn. 14 and 15 to calculate w-H2O covariance ## w'q' = ((w'ln(H2O.raw)')/x*kw) - OC2 ## OC2 = (ko*(%O2)*(MW of O2)*Ptot/(-kw*R*T^2))*w'T' log.q <- log(q) ##log.h2o <- detrend(log(H2O.raw)) w.q <- var(w,log.q, na.rm=T) OC2 <- (ko*P*1000*0.2095*31.9988)/(kw*8.315*(Tbar.K^2)) wq.cov <- -(w.q/(kw*plength)) + (OC2*w.tc) # water vapor flux: wq.cov <- wq.cov/1000 #convert to kg m-2 s-1 # Now latent heat flux (eq 42a Webb et al 1980) mu <- 1.6077 ### Note - need to correct the Kr hygrometer for sensible heat flux E.kr <- Lv*(wq.cov + (rhov*(w.tc/Tbar.K))) ### is the HcorrE a "+" to the covariance?? I think so. HcorrE <- Lv*(rhov*(w.tc/Tbar.K)) #if(i == 1) browser() ######### Need buoyancy flux here: ## Fbuoy <- w'T' + Tbar.K*0.61*w'q' (where q is the mixing ratio: rhov/rhoa) Fbuoy <- w.tc + 0.61*Tbar.K*(wq.cov/rho) ## units are m K/s # return list list(w.tc=w.tc, H=H, E.kr=E.kr,wq.cov=wq.cov, HcorrE=HcorrE, h2o=h2o, Fbuoy=Fbuoy) } ######################################################################## fun.windstuff <- function(u,v,w, bearing) { ## calculate wind speed and direction # (1) calculate scalar wind speed ###ws <- (u^2 + v^2 + w^2)^0.5 ws <- (u^2 + v^2)^0.5 U <- mean(ws,na.rm=T) sdU <- var(ws, na.rm=T)^0.5 #### Simple average wind direction: ubar <- mean(u,na.rm=T) vbar <- mean(v, na.rm=T) wd2 <- atan(vbar/ubar)*(180/pi) ## +/- 90 degrees; relative to sonic wd2 <- ifelse(ubar >= 0, 360 - wd2, 180 - wd2) wd2 <- ifelse (wd2 >= 360, wd2-360, wd2) # now in 0-360 deg. relative to sonic ### put in bearing of sonic # correct for original sonic orientation wd2 <- wd2 + bearing wd2 <- ifelse (wd2 >= 360, wd2-360, wd2) ####################################3 #### method #3 for wind direction: theta <- atan(ubar/vbar)*(180/pi) ### get the correct hemisphere: wd3 <- ifelse(vbar > 0, (270 + theta),(90 + theta)) ### put in sonic bearing wd3 <- wd3 + bearing wd3 <- ifelse(wd3 > 360, wd3 - 360,wd3) # (2) original wind direction calculation d <- atan(v/u)*180/pi d <- ifelse(u >= 0, 360 - d, 180 - d) d <- ifelse (d >= 360, d-360, d) ### now 0-360 scale with respect to sonic # correct for original sonic orientation d <- d + bearing d <- ifelse (d >= 360, d-360, d) ###################################################################### ### Jan. 2001 - this is the correct way to get average wind direction. wd <- d*(pi/180) # get into radians Ux <- (mean(sin(wd),na.rm=T)) Uy <- (mean(cos(wd),na.rm=T)) wdir <- (atan(Ux/Uy))*(180/pi) ## Now get the proper quadrant : if(Uy < 0) wdir <- wdir + 180 if(Uy > 0) wdir <- ifelse(wdir < 0, 360+wdir, wdir) Udir <- wdir ## Vector Wind speed : Ue <- mean((ws*sin(wd)),na.rm=T) Un <- mean((ws*cos(wd)),na.rm=T) Vws <- (Ue^2 + Un^2)^0.5 # New way of getting std. dev. of wind direction ## From Campbell 23x book - Yamartino algorithm (pp11-2) epsil <- ( 1 - (Ux^2 + Uy^2))^.5 sdUdir <- asin(epsil)*(1 + (0.1547*epsil^3)) sdUdir <- sdUdir*(180/pi) list(U=U,Udir=Udir,Vws=Vws,sdU=sdU,sdUdir=sdUdir) } ############################################################### fun.turbulence <- function(X) { # Subroutine which calculates some turbulence stuff from sonic anemometer data # Inputs # X: matrix of u, v, w, and tc (assumed already rotated!!) u <- X[,1] v <- X[,2] w <- X[,3] tc <- X[,4] + 273.15 sdw <- var(w,na.rm=T)^.5 sdtc <- var(tc,na.rm=T)^.5 tcbar <- mean(tc, na.rm=T) u.w <- var(u,w, na.rm=T) v.w <- var(v,w, na.rm=T) w.tc <- var(w,tc,na.rm=T) ustar <- ((u.w)^2 + (v.w)^2) ^ 0.25 ObLength <- -1* (tcbar)*(ustar^3) / (0.4 * 9.81 * (w.tc)) z.m <- 3.0 ## approx meas. height = 3 m z.L <- z.m/ObLength ########## Eddy Diffusity ############### ### from single height measurement. ########################################################## vKc <- 0.4 # von Karmans constant ########## calculate integrated stability functions at each z = 3 m phi <- 1 if(z.L > 0.15) phi <- 1 + (5*z.L) if(z.L < -0.15) phi <- (1 - (16*z.L))^.25 Kneut <- vKc*z.m*ustar Kedd <- Kneut/phi list(sdw=sdw,sdtc=sdtc,tcbar=tcbar,ustar=ustar,z.L=z.L, Kneut=Kneut, Kedd=Kedd) } #####################################################################333