# Load the workspace file 'workshop.core' # Load the following libraries onto your workspace: library(MASS) library(spatstat) library(splancs) library(MCMCpack) # The following functions are required, but are already on the workspace: # core.nu.mcmc # draw.contour # plot.CA.shade # jet.colors ############### # Single Cores: ############### #The first step to implementing our core area model is to do an initial test for departure from a complete spatial random (CSR) point process in the data set we are interested in testing. We test for departures from CSR by using the L-function, drawing 1000 Monte Carlo simulations on the home range we are interested in testing. We do not use a boundary correction because presumably the home range boundary is the demarcation between used and unused areas by an animal. # There are 2 data sets in the workspace; # bob065, rs051 # Let's first test the bob065 data set to determine if the home range point pattern departs significantly from CSR. test.csr(bob065[,4:5]) # You will notice that each of the animal's point patterns show evidence of a significant departure from a CSR point process over a range of spatial scales. This is evident through the estimated L-values being outside the envelopes. Specifically, the graphs show that the observed data from each animal are more clustered than expected with a CSR process because the estimated L-values are above the upper CSR envelope. Thus, each animal shows evidence that they have a core area. # The Bayesian core area model is computationally intensive. Each model takes ~5-10 minutes to run, but varies given input parameters. On the workspace are the model outputs from each of the animals. But, to give you an understanding of how to implement the code to run the model, we will run the model on one animal with far fewer simulations (i.e., 100), than would be typically run (anywhere from 5000 to >100000, depending on the dataset). test=core.nu.mcmc(as.matrix(bob065[,4:5]),bw=NULL, nu=2,n.mcmc=100,ng.max=75, iso=c(.5,.5), iso.tune=0.01) # This code will run the model that optimally partitions the home range into 2 CSR point processes. The data set used is bob065, where the 4th and 5th columns contain the x and y data. The bandwidth parameter, bw, is left as the default (i.e., the reference bandwidth selection procedure) but can be modified if desired, nu is the number of subsets to partition the home range into, n.mcmc is the number of mcmc simulations to run, ng.max the max number of bins in either the x or y direction (used to discretize the home range), iso is the starting value and must sum to 1, and iso.tune is the tuning parameter for the mcmc algorithm. # Try running the above code again with different starting values. # Try the following: ng.max=50, iso=c(.2,.8) # When running these models, it's best to take an iterative approach for determining how many regions to partition the home range into. Begin by partitioning the home range into 2 regions, and if there is still evidence of clustering in one of the regions, try partitioning into 3 regions. # Let's look at the output from some of the models that were already run. We'll start with the model results from the bob065 data set. We used the following input for this model. DO NOT RUN THIS CODE. # bob065.2.out=core.nu.mcmc(as.matrix(bob065[,4:5]),n.mcmc=5000,nu=2,ng.max=50, iso=c(.5,.5), iso.tune=.01) # The output for this model is saved in the workspace as bob065.2.out # Let's view the model output. matplot(bob065.2.out$iso.save[1,],type="l",lty=1) # The model looks like it converged well around the 55% isopleth, with little evidence of a need for a burnin period. We can sub-sample the output to remove any serial autocorrelation that might exist with the following modification: matplot(bob065.2.out$iso.save[1,seq(1,5000,5)],type="l",lty=1) # We can visualize the posterior distribution of the isopleth parameter: bob065.density=density(bob065.2.out$iso.save[1,seq(1,5000,5)]) plot(bob065.density) # We can also obtain the mode of the posterior distribution as our estimate of the isopleth parameter: plot(bob065.density) bob065.density$x[which.max(bob065.density$y)] abline(v=bob065.density$x[which.max(bob065.density$y)],col=2) # A key feature of our model is the ability to account for the uncertainty of our core area estimate. We can obtain the 95% Credible Interval by: quantile(bob065.2.out$iso.save[1,seq(1,5000,5)],c(.025,.975)) abline(v=quantile(bob065.2.out$iso.save[1,seq(1,5000,5)],c(.025,.975)),col=4) # Now that we have obtained a model that appears to be sufficient at partitioning the home range into 2 regions (i.e., core area and non-core), we need to test that the partition has indeed created two regions that are both CSR. Again we'll use the test.csr function, but we need to change a couple of parameters. Now that we're testing for CSR in >1 region, we need to include the spatial scale we want to test over for both regions (i.e., r) and which isopleths define the boundaries for the regions. In this case, 95% and 56%. Values should be ordered from the outermost boundary to innermost: test.csr(bob065[,4:5],r=c(500, 300),alpha=c(.95,.56)) # The results indicate that the 2 CSR model was sufficient to account for all the clustering initially detected in the data set. # Now we can visualize the results. The code for plotting can also be adjusted if a something other than the reference bandwidth is used, by replacing NULL below with the desired bandwidth: plot.CA.shade(bob065[,4:5],NULL, bob065.2.out$iso.save[1,seq(1,5000,5)],100,,165065) ################## # Multiscale Cores ################## # Above we used the core area model to partition the home range point pattern into 2 regions with separate CSR point processes. But there are instances where clustering occurs at multiple scales, something that is unaddressed in other methods for describing core areas. Below we will show how this model can be used to partition the home range into >2 CSR regions. #Plot the data set rs051 and test for any clustering in home range test.csr(rs051) #Visually, there appears to be 3 CSR processes, but we'll first look at the results for partitioning the home range into 2 CSR regions. # Look at the output of the single core model for rs051 # (ran with nu=2, n.mcmc=5000, iso=c(.5,.5), iso.tune=0.01, ng.max=75) matplot(rs051.2.out$iso.save[1,],type="l",lty=1) # The model appears to converge on 53% as the optimal isopleth after a burnin of ~100. rs051.2.density=density(rs051.2.out$iso.save[1,seq(100,5000,4)]) plot(rs051.2.density) # Get the mode of the posterior distribution: plot(rs051.2.density) rs051.2.density$x[which.max(rs051.2.density$y)] abline(v=rs051.2.density$x[which.max(rs051.2.density$y)],col=2) # Now let's test to see if the 2 CSR model is sufficient to partition the home range into homogenous regions: test.csr(rs051 [,1:2], r=c(300,100), alpha=c(.95,.53)) # The test should show that there is still significant clustering. These results indicate that the 2 CSR model is insufficient and that >2 CSR regions exist. We need to account for all of these areas if we want to accurately characterize the core area. # The code for running and analyzing data with >2 CSR regions is slightly different than for data with 2 CSR regions. # Let's view the code used to delineate 3 CSR regions in the coy560 dataset # rs051.3.out=core.nu.mcmc(as.matrix(rs051[,1:2]),10000,3,ng.max=75, iso=c(.3,.4,.3), iso.tune=.01) # Note the iso parameter in the code. Before we only had two values; one corresponding to our starting isopleth value, and the other value being the amount needed so the two summed to 1. This time, we need to start with three isopleth values; one for the 'inner core area', one for the 'outer core area' and the other value required so all three sum to 1. The order of the values is important, from inner to outer. # View the output from the output from the model: matplot(t(apply(rs051.3.out$iso.save[1:2,],2,cumsum)),type="l",lty=1) rs051.3.density=density(t(apply(rs051.3.out$iso.save[1:2,seq(1000,10000,9)],2, cumsum))) plot(rs051.3.density) # First obtain the isopleth estimate for the inner core max1=which.max(rs051.3.density$y[1:200]) rs051.3.density$x[max1] abline(v=rs051.3.density$x[max1],col=2) # Then the isopleth estimate for the outer core max2=which.max(rs051.3.density$y[200:500])+199 rs051.3.density$x[max2] abline(v=rs051.3.density$x[max2],col=2) # Now obtain the 95% Equal Tail Credible Intervals for the 2 isopleths apply(apply(rs051.3.out$iso.save[1:2,seq(1000,10000,9)],2,cumsum),1,quantile,c(.025,.975)) abline(v=apply(apply(rs051.3.out$iso.save[1:2,seq(1000,10000,9)],2,cumsum),1,quantile,c(.025,.975)),col=4) # Again, let's test our model has adequately partitioned the home range into 3 CSR processes: test.csr(rs051[,1:2],r=c(200,100,100),alpha=c( .95,.7,.32 )) # Finally, we can visualize the results of our core area model: plot.CA.shade(rs051[,1:2],NULL,c(apply(rs051.3.out$iso.save[1:2,seq(1000,10000,9)],2,cumsum)),100,,151051) # Core area size # The size of the core area is directly related to the isopleth value for the data set, so we can also characterize the posterior distribution of core area size. For the first example using the bob065 data set: bob065.casize.density=density(bob065.2.out$ca.size.save[seq(1,5000,5),1]) bob065.casize.density$x[which.max(bob065.casize.density$y)] quantile(bob065.2.out$ca.size.save[seq(1,5000,5),1],c(.025,.975)) # Or for the rs051 data set: rs051.casize.density=density(t(apply(rs051.3.out$ca.size.save[seq(1000,10000,9),1:2],1,cumsum))) size.max1=which.max(rs051.casize.density$y[1:200]) rs051.casize.density$x[size.max1] # Then the isopleth estimate for the outer core size.max2=which.max(rs051.casize.density$y[200:500])+199 rs051.casize.density$x[size.max2] apply(apply(rs051.3.out$ca.size.save[seq(1000,10000,9),1:2],1,cumsum),1,quantile,c(.025,.975)) ######################## #Changing the bandwidth# ######################## # The default bandwidth value is obtained from the reference bandwidth: c(bandwidth.nrd(rs051[,1]),bandwidth.nrd(rs051[,2])) # You can easily modify the bandwidth by multiplying the reference bandwidth to match your desired bandwidth. # For example, we can use 90% of the reference bandwidth as our chosen bandwidth: bw.9=c(bandwidth.nrd(rs051[,1])*.9,bandwidth.nrd(rs051[,2])*.9) # This can then be substituted into the core.nu.mcmc function (i.e., bw=bw.9).