diff --git a/src/FOF.jl b/src/FOF.jl index 1a8f015..bf5b11c 100644 --- a/src/FOF.jl +++ b/src/FOF.jl @@ -7,10 +7,10 @@ using DataStructures export groups """ -Friends of Friends Algorithm: for sets of points x, using linking length l and returning groups with more than minlength members in an array of arrays of indeces inot the point data provided. +Friends of Friends Algorithm: for sets of points x, using linking length l and returning groups with more than minlength members in an array of arrays of indeces inot the point data provided. using Gadfly - x = rand((2,100)) - gps = FOF.groups(x,.1, minlength=3) + x = rand((2,100)) + gps = FOF.groups(x,.1, minlength=3) n = 1 plot(layer(x=x[1,gps[n]], y=x[2,gps[n]], Geom.point,Theme(default_color=colorant"red")), layer(x=x[1,:], y=x[2,:], Geom.point, Theme(default_color=colorant"blue")) ) @@ -22,7 +22,7 @@ function groups(x, l; minlength=1) ds = IntDisjointSets(Npart) for i in 1:Npart idxs = inrange(tree, x[:,i], l, false) # within search radius - for j in eachindex(idxs) # + for j in eachindex(idxs) # union!(ds,i,idxs[j]) end if (num_groups(ds) == 1) # just in case people use too large a linking length don't waste time @@ -31,7 +31,7 @@ function groups(x, l; minlength=1) end # in case everything has been joined already exit end println("FOF: finished grouping") - + idxs = find(ds.ranks) # all non-zero ranks are parent particles in groups groupid = [ds.parents[idxs[i]] => i for i in eachindex(idxs)] grouplen = Dict{Int,Int}() diff --git a/src/Hydro.jl b/src/Hydro.jl index 7f41f92..0e9f40b 100644 --- a/src/Hydro.jl +++ b/src/Hydro.jl @@ -38,13 +38,13 @@ function SolveHydroEquations!(gp, gd, p, dt) FF = zeros(Neq, Ngl[1]+1,Ngl[2],Ngl[3]) GG = zeros(Neq, Ngl[1],Ngl[2]+1,Ngl[3]) HH = zeros(Neq, Ngl[1],Ngl[2],Ngl[3]+1) - + HLL_fluxes(FF, GG, HH, U, gd) - + update_dUF(dUF,FF) # don't do the 1./dx here.. : equ(3) update_dUG(dUG,GG) update_dUH(dUH,HH) - + dx = [(g.RE[i]-g.LE[i])/g.dim[i] for i in 1:3] update_U(U, dUF,dUG,dUH, dx, dt) @@ -78,7 +78,7 @@ end function update_dUG(dUG,GG) for j=1:size(dUG,3), n=1:size(dUG,1) - dUG[n,:,j,:] = -(GG[n,:,j+1,:] - GG[n,:,j,:]) + dUG[n,:,j,:] = -(GG[n,:,j+1,:] - GG[n,:,j,:]) end nothing end @@ -114,7 +114,7 @@ function HLL_fluxes(FF, GG, HH, U, gd) # calculate P and cs - v2 = zeros(1,Ngl...) ## note the weird indexing here.... for easier algebra with U + v2 = zeros(1,Ngl...) ## note the weird indexing here.... for easier algebra with U v2[1,:,:,:] = d.d["V₁"].^2 + d.d["V₂"].^2 + d.d["V₃"].^2 if DualEnergyFormalism ε = d.d["ε"] @@ -131,7 +131,7 @@ function HLL_fluxes(FF, GG, HH, U, gd) # create left and right fluxes LR_fluxes(FL,FR, GL,GR, HL,HR, U, gd, P) - + # create HLL fluxes, FF, GG, HH for n=1:size(FF,1), j=1:size(FF,2) @@ -184,7 +184,7 @@ function HLL_fluxes(FF, GG, HH, U, gd) end function LR_states(UL_F,UR_F,UL_G,UR_G,UL_H,UR_H, U) - # piecewise constant + # piecewise constant Ngl = size(U)[2:end] Neq = size(U)[1] for n=1:Neq @@ -209,7 +209,7 @@ function LR_states(UL_F,UR_F,UL_G,UR_G,UL_H,UR_H, U) end function LR_fluxes(FL,FR, GL,GR, HL,HR, U, gd, P) - # piecewise constant + # piecewise constant d = gd[1] Ngl = size(U)[2:end] Neq = size(U)[1] @@ -220,7 +220,7 @@ function LR_fluxes(FL,FR, GL,GR, HL,HR, U, gd, P) V₂[1,:,:,:] = d.d["V₂"] V₃[1,:,:,:] = d.d["V₃"] - + for i=1:size(FL,2) FL[1,i,:,:] = U[3,mod1(i-1,end),:,:] FR[1,i,:,:] = U[3,mod1(i,end),:,:] @@ -261,7 +261,7 @@ function LR_fluxes(FL,FR, GL,GR, HL,HR, U, gd, P) end function ComputeHydroTimeStep(gp, gd, p) - # calculate P and cs + # calculate P and cs d = gd[1] g = gp[1] Ngl = size(d.d["ρ"]) @@ -285,20 +285,20 @@ function ComputeHydroTimeStep(gp, gd, p) end function InitializeHydroSimulation(gp, gd, p) - - global const rank = sum([(i > 1) for i in TopgridDimensions]) # infer dimensionality + + global const rank = sum([(i > 1) for i in TopgridDimensions]) # infer dimensionality global const eos = EOS(γ) if verbose println("Dimensions according to TopgridDimensions: ", rank) end - + # Create TopGrid dim = ntuple((i) -> TopgridDimensions[i], length(TopgridDimensions)) - g = gp[1] = GridPatch(DomainLeftEdge,DomainRightEdge,1,1, dim) + g = gp[1] = GridPatch(DomainLeftEdge,DomainRightEdge,1,1, dim) gd[1] = GridData(Dict()) d = gd[1] - + # allocate our arrays for the hydro quantities d.d["ρ"] = ones(g.dim) d.d["E"] = ones(g.dim) @@ -309,8 +309,8 @@ function InitializeHydroSimulation(gp, gd, p) d.d["V₂"] = zeros(g.dim) d.d["V₃"] = zeros(g.dim) - ρ = d.d["ρ"] - E = d.d["E"] + ρ = d.d["ρ"] + E = d.d["E"] V₁ = d.d["V₁"] V₂ = d.d["V₂"] V₃ = d.d["V₃"] @@ -322,7 +322,7 @@ function InitializeHydroSimulation(gp, gd, p) # call Initializer given as ProblemDefinition in the .conf file callsig = string("initialvalues=",conf["ProblemDefinition"]) eval(parse(callsig)) # call the correct routine - + for k in 1:size(ρ,3) for j in 1:size(ρ,2) for i in 1:size(ρ,1) @@ -337,7 +337,7 @@ function InitializeHydroSimulation(gp, gd, p) end end end - + nothing end @@ -372,10 +372,10 @@ function SodShocktube(x) V₃ = 0 if x[1] < 0.5 ρ = ρl - E = ε_from_P(Pl, ρ, eos) + 0.5*(V₁^2+V₂^2+V₃^2) + E = ε_from_P(Pl, ρ, eos) + 0.5*(V₁^2+V₂^2+V₃^2) else ρ = ρr - E = ε_from_P(Pr, ρ, eos) + 0.5*(V₁^2+V₂^2+V₃^2) + E = ε_from_P(Pr, ρ, eos) + 0.5*(V₁^2+V₂^2+V₃^2) end ρ, E, V₁, V₂, V₃ -end \ No newline at end of file +end diff --git a/src/NoName.jl b/src/NoName.jl index c855875..df9da05 100644 --- a/src/NoName.jl +++ b/src/NoName.jl @@ -12,22 +12,23 @@ include("restart.jl") include("Hydro.jl") # not a module to access same global space include("cosmology.jl") include("ParticleMesh.jl") +include("SPH.jl") println("You want to run this most likely from the shell as:") println(""" "julia --color=yes -i -e "using NoName; gp, gd, p = run_noname();" particle_mesh.conf" """) function noname() - + @debug("in noname()") gp = Dict() # we keep a hash table to keep track of grids gd = Dict() # empty Dict to store Grid Data p = Dict() # empty Dict to store particle data - - # conf["Initializer"] + + # conf["Initializer"] initializeRoutine(gp,gd,p) @info("Finished Initializing.") - # conf["Evolve"] + # conf["Evolve"] evolveRoutine(gp,gd,p) @info("Finished Evolve.") @@ -55,7 +56,7 @@ function run_noname() @info(" $arg => $val") end - parseconf(string(NONAME_SRC_DIR,"/default.conf")) # load default parameters + parseconf(string(NONAME_SRC_DIR,"/default.conf")) # load default parameters @info("Read ", string(NONAME_SRC_DIR,"/default.conf")) global conf = copy(AppConf.conf) @@ -78,17 +79,17 @@ function run_noname() if parsed_args["restart"] conf["Initializer"] = "restart" end - + eval(parse(string("initializeRoutine=",conf["Initializer"]))) eval(parse(string("evolveRoutine=",conf["Evolve"]))) - + done = false if haskey(conf,"ProfilingOn") if conf["ProfilingOn"] Profile.init(delay=0.01) # Initialize Profiler @profile gp, gd, p = noname() write_profiling_results(ProfilingResultsFile) # currently broken ... - @debug("Wrote Profiling Results file:", ProfilingResultsFile) + @debug("Wrote Profiling Results file:", ProfilingResultsFile) done = true end end diff --git a/src/ParticleMesh.jl b/src/ParticleMesh.jl index 46f678d..0a50675 100644 --- a/src/ParticleMesh.jl +++ b/src/ParticleMesh.jl @@ -16,7 +16,7 @@ function analyzePM(gp, gd, p) if isdefined(:cosmo) @info("CurrentRedshift:", c["CurrentRedshift"]) end - + on = c["CurrentOutputNumber"] # Since we are changing it cannot use shortcut s = @sprintf("%5.5i", on) newD = OutputDirectory @@ -37,8 +37,8 @@ function analyzePM(gp, gd, p) fname = string(newD,"/",s,".h5") IOtools.grid_output(fname, gp, gd,overwrite=true) IOtools.particle_output(fname, p) - - + + # saving power spectrum fname = string(newD,"/",s,"_PS.png") karray, power = powerSpectrum(gp, gd, p) @@ -47,25 +47,25 @@ function analyzePM(gp, gd, p) ylabel("P(k)") title("Power spectrum") savefig(fname) - close() + close() end function InitializeParticleSimulation(gp, gd, p) @debug("InitializeDarkMatterSimulation: start.") - global const rank = sum([(i > 1) for i in TopgridDimensions]) # infer dimensionality + global const rank = sum([(i > 1) for i in TopgridDimensions]) # infer dimensionality @info("Dimensions according to TopgridDimensions: ", rank) - + Npart = prod(ParticleDimensions) # Create TopGrid dim = (TopgridDimensions...) - gp[1] = GridPatch(DomainLeftEdge,DomainRightEdge,1,1, dim) + gp[1] = GridPatch(DomainLeftEdge,DomainRightEdge,1,1, dim) gd[1] = GridData(Dict()) d = gd[1] - + # allocate our arrays for the grid quantities - d.d["ρD"] = ones(gp[1].dim) # mass density + d.d["ρD"] = ones(gp[1].dim) # mass density d.d["Φ"] = ones(gp[1].dim) # gravitational potential # allocate particles @@ -91,17 +91,17 @@ function InitializeParticleSimulation(gp, gd, p) conf["OmegaCDM"] = cosmo.Ω_m - conf["OmegaBaryon"] end p["m"] .*= conf["OmegaCDM"] # particles - + else @warn("Specified Cosmology but gave no InitialRedshift! ") @warn("Proceed with caution ...") end end - + # call Initializer given as ProblemDefinition in the .conf file eval(parse(string("initialvalues=",conf["ProblemDefinition"]))) - - initialvalues(gp, gd, p) + + initialvalues(gp, gd, p) nothing end @@ -109,7 +109,7 @@ end function UniformParticles(gp, gd, p) initialize_particles_uniform(p["x"]) - + nothing end @@ -131,6 +131,7 @@ function PowerSpectrumParticles(gp, gd, p) x = p["x"] initialize_particles_uniform(x) + initialize_velocities_sinusoidal(x, p["v"]) rank = size(x,1) dims = conf["ParticleDimensions"] @@ -148,7 +149,7 @@ function PowerSpectrumParticles(gp, gd, p) ak = Float64(PSv * gauss[1]/k2) bk = Float64(PSv * gauss[2]/k2) # println(k2,":", PSv) - if k2 > 0 + if k2 > 0 c[i,j,k] = (ak - im * bk)/2 * kf[dd] end end @@ -164,7 +165,7 @@ function PowerSpectrumParticles(gp, gd, p) end end # Setup velocities : Still need to implement ... - + nothing end @@ -172,7 +173,7 @@ end function initialize_particles_uniform(x) rank = size(x,1) c=1 - if rank==1 + if rank==1 for i in 1:ParticleDimensions[1] x[i] = i end @@ -193,7 +194,7 @@ function initialize_particles_uniform(x) x[1,c] = i x[2,c] = j x[3,c] = k - c += 1 + c += 1 end end end @@ -206,7 +207,7 @@ function deposit(rho,x,m; interpolation="none") for i in eachindex(rho) rho[i] = 0. end - + if interpolation == "none" ngpdensity(rho,x,m) elseif interpolation == "cic" @@ -215,7 +216,7 @@ function deposit(rho,x,m; interpolation="none") sicdensity(rho,x,m) end - + end function sic_interp_1d(pa, a, x) @@ -229,7 +230,7 @@ function sic_interp_1d(pa, a, x) cx = x[n] inext = i + delt xnext = inext/Ngl - cdx = + cdx = dxe = abs(x[n+1]-x[n]) ip1 = (i == Ngl) ? 1 : i+1 ip1 = (i == 1) ? 1 : i+1 @@ -243,7 +244,7 @@ function ngpdensity(rho,x,m) Nglx = convert(Float64,TopgridDimensions[1]) Ngly = convert(Float64,TopgridDimensions[2]) Nglz = convert(Float64,TopgridDimensions[3]) - + if rank == 1 for i in 1:size(x,2) ii = floor(Int64,x[1,i]*Nglx)+1 @@ -352,7 +353,7 @@ function potential_1d(phi,rho,rt) rt[1] = 0. # make sure singularity is zero phi[:] = irfft(rt,size(rho,1)) .* 1/Nglx^2 nothing - + end @@ -381,7 +382,7 @@ function potential_2d(phi,rho,rt) rt[1,1] = 0. # make sure singularity is zero phi[:] = irfft(rt,size(rho,1)) .* 1/Nglx^2 nothing - + end @@ -390,7 +391,7 @@ function potential_3d(phi,rho,rt) Nglx = TopgridDimensions[1] Ngly = TopgridDimensions[2] Nglz = TopgridDimensions[3] - + Ngl2y = round(Int64, Ngly/2) Ngl2z = round(Int64, Nglz/2) @@ -407,7 +408,7 @@ function potential_3d(phi,rho,rt) end for i in 1:size(rt,1) kx = 2pi*(i-1)/Nglx - Ginv = -4.*(sin(kx/2)^2+sin(ky/2)^2+sin(kz/2)^2) # unnormalized Ginv = -(kx^2+ky^2+kz^2) + Ginv = -4.*(sin(kx/2)^2+sin(ky/2)^2+sin(kz/2)^2) # unnormalized Ginv = -(kx^2+ky^2+kz^2) if !(Ginv == 0.0) rt[i,j,k] /= Ginv end @@ -417,7 +418,7 @@ function potential_3d(phi,rho,rt) rt[1,1,1] = 0. # make sure singularity is zero phi[:] = irfft(rt,size(rho,1)) .* 1/Nglx^2 # not tested for non cubic grids nothing - + end function deriv1(xd, x) @@ -502,7 +503,7 @@ function deriv2_1d(x) im1 = (i == 1) ? Ngl : i-1 xd[i] = x[ip1] + x[im1] - 2*x[i] end - xd*Ngl^2 # + xd*Ngl^2 # end function deriv2_2d(x) @@ -516,7 +517,7 @@ function deriv2_2d(x) xd[i,j] = x[ip1,j] + x[im1,j] + x[i,jp1] + x[i,jm1] - 4*x[i,j] end end - xd*Ngl^2 # + xd*Ngl^2 # end function deriv2_3d(x) @@ -568,9 +569,9 @@ function interpVecToPoints(pa, a, x; interpolation="none") cic_interp_3d(pa, a, x) end end - nothing + nothing end - + function cic_interp_1d(pa, a, x) for n in eachindex(x) i = floor(Int64,x[n]*Ngl)+1 @@ -604,7 +605,7 @@ function cic_interp_3d(pa, a, x) Ng1 = size(a,2) Ng2 = size(a,3) Ng3 = size(a,4) - + for n in 1:size(x,2) i = floor(Int64,x[1,n]*Ng1)+1 ip1 = (i == Ng1) ? 1 : i+1 @@ -616,13 +617,13 @@ function cic_interp_3d(pa, a, x) kp1 = (k == Ng3) ? 1 : k+1 dz = x[3,n]*Ng3-k+1 for m in 1:3 - @inbounds pa[m,n] = (a[m,i,j,k]*(1.-dx)*(1.-dy)*(1.-dz) + + @inbounds pa[m,n] = (a[m,i,j,k]*(1.-dx)*(1.-dy)*(1.-dz) + a[m,ip1,j,k]*dx*(1.-dy)*(1.-dz) + - a[m,i,jp1,k]*(1.-dx)*dy*(1.-dz) + - a[m,ip1,jp1,k]*dx*dy*(1.-dz) + - a[m,i,j,kp1]*(1.-dx)*(1.-dy)*dz + - a[m,ip1,j,kp1]*dx*(1.-dy)*dz + - a[m,i,jp1,kp1]*(1.-dx)*dy*dz + + a[m,i,jp1,k]*(1.-dx)*dy*(1.-dz) + + a[m,ip1,jp1,k]*dx*dy*(1.-dz) + + a[m,i,j,kp1]*(1.-dx)*(1.-dy)*dz + + a[m,ip1,j,kp1]*dx*(1.-dy)*dz + + a[m,i,jp1,kp1]*(1.-dx)*dy*dz + a[m,ip1,jp1,kp1]*dx*dy*dz) end end @@ -675,17 +676,17 @@ function evolvePM(gp, gd, p) rho = gd[1].d["ρD"] φ = gd[1].d["Φ"] # potential - acc = zeros((3,g.dim...)) - + acc = zeros((3,g.dim...)) + pa = zeros(x) # particle accelerations rt = rfft(φ) # initialize buffer for real-to complex transform dx = 1. /maximum(g.dim) # smallest dx dt = InitialDt - + c["CurrentTime"] = c["StartTime"] c["CurrentCycle"] = c["StartCycle"] - + while c["CurrentTime"] < StopTime && c["CurrentCycle"] < StopCycle make_periodic(x) deposit(rho,x,m,interpolation=ParticleDepositInterpolation) @@ -696,7 +697,7 @@ function evolvePM(gp, gd, p) interpVecToPoints(pa, acc, x, interpolation=ParticleBackInterpolation) # LeapFrog step. Some codes combine the two drift steps, - # but then need to worry about x and v being known at different times. + # but then need to worry about x and v being known at different times. drift(x,v,dt/2) kick(v,pa,dt) drift(x,v,dt/2) @@ -705,7 +706,7 @@ function evolvePM(gp, gd, p) dt = CourantFactor*dx/maximum(abs(v)+1e-30) dt = minimum([dt, MaximumDt]) - + if ((c["CurrentTime"]+dt) > c["StopTime"] ) dt = (1. + 1e-15)*(c["StopTime"] - c["CurrentTime"]) println("final dt = ", dt) @@ -732,14 +733,14 @@ function powerSpectrum(gp, gd, p) delta = rho/rho_mean - 1 # density fluctuation deltak2 = abs(fft(delta)).^2 * prod(box) / prod(dims)^2 - + dk = 2pi / maximum(dims) # bin width #println("dk", dk) lenk = round(Int, maximum(dims)/2) # k array length karray = collect(1:lenk) * dk # k array power = zeros(lenk) # power spectrum counts = zeros(lenk) # array to keep counts - # we first add up power for each bin, then divide by counts + # we first add up power for each bin, then divide by counts for k in 1:dims[3], j in 1:dims[2], i in 1:dims[1] kf = fftfreq([i,j,k], dims) ka = sqrt(dot(kf,kf)) @@ -750,7 +751,7 @@ function powerSpectrum(gp, gd, p) counts[n] += 1 end end # END looping over all frequencies - + power = power./counts karray, power @@ -770,21 +771,21 @@ function evolvePMCosmology(gp, gd, p) m = p["m"] rho = gd[1].d["ρD"] φ = gd[1].d["Φ"] # potential - acc = zeros((3,g.dim...)) + acc = zeros((3,g.dim...)) a = 1.0 ȧ = 0.0 - + pa = zeros(x) # particle accelerations rt = rfft(φ) # initialize buffer for real-to complex transform dx = 1. /maximum(g.dim) # smallest dx dt = InitialDt zinit = conf["InitialRedshift"] - + c["CurrentTime"] = c["StartTime"] c["CurrentCycle"] = c["StartCycle"] - + while c["CurrentTime"] < c["StopTime"] && c["CurrentCycle"] < c["StopCycle"] make_periodic(x) deposit(rho,x,m,interpolation=ParticleDepositInterpolation) @@ -795,9 +796,9 @@ function evolvePMCosmology(gp, gd, p) a = a_from_t_internal(cosmo, c["CurrentTime"]+dt/2, zinit, zwherea1=zinit) ȧ = dadt(cosmo, a, zwherea1=zinit) - + # LeapFrog step. Some codes combine the two drift steps, - # but then need to worry about x and v being known at different times. + # but then need to worry about x and v being known at different times. a = a_from_t_internal(cosmo, c["CurrentTime"] + dt/2/2, zinit, zwherea1=zinit) drift(x,v,dt/2/a) a = a_from_t_internal(cosmo, c["CurrentTime"] + dt, zinit, zwherea1=zinit) @@ -806,14 +807,14 @@ function evolvePMCosmology(gp, gd, p) a = a_from_t_internal(cosmo, c["CurrentTime"] + 3.0*dt/2/2, zinit, zwherea1=zinit) drift(x,v,dt/2/a) - + c["CurrentTime"] += dt c["CurrentRedshift"] = z_from_t_internal(cosmo, c["CurrentTime"], zinit) dt = CourantFactor*dx/maximum(abs(v)+1e-30) dtFrac = DtFractionOfTime*c["CurrentTime"] dt = min(dt, dtFrac, MaximumDt) - + if ((c["CurrentTime"]+dt) > c["StopTime"] ) dt = (1. + 1e-6)*(c["StopTime"] - c["CurrentTime"]) println("final dt = ", dt) diff --git a/src/SPH.jl b/src/SPH.jl index 109f0b8..49a79b9 100644 --- a/src/SPH.jl +++ b/src/SPH.jl @@ -3,6 +3,7 @@ using NearestNeighbors import NearestNeighbors.check_input +include("ParticleMesh.jl") """Cubic Spline Kernel popular in Smoothed Particles Hydrodynamics (1,2,3D form) call with @@ -11,6 +12,7 @@ import NearestNeighbors.check_input where r is the (scalar) distance and h the smoothing length """ const SPH_default_dim=2 +const GAMMA = 5./3 # default adiabatic index @inline function W{T<:Real}(r::T,h::T;ndim=SPH_default_dim) const norm = [4./3., 40/7pi, 8/pi] @@ -25,14 +27,14 @@ const SPH_default_dim=2 end # test normalization # x = collect(1:10000)/10000 -# sum(SPH.W(x,1.,ndim=1) * diff(x)[1]) # should come out to 1 +# sum(SPH.W(x,1.,ndim=1) * diff(x)[1]) # should come out to 1 # sum(SPH.W(x,1.,ndim=2).* x *diff(x)[1]*2pi) # sum(SPH.W(x,1.,ndim=3).*x.^2 *(diff(x)[1])*4pi) W{T<:Real}(r::Vector{T}, h::T;ndim=SPH_default_dim) = [W(r[i],h,ndim=ndim) for i in eachindex(r)] """ Derivative of cubic spline kernel - + dw ij = dW(r,h, ndim=3) ndim can be 1,2 and 3 @@ -65,7 +67,7 @@ using NearestNeighbors The shortest distance to a point or one of its periodic images is calculated. So - + x = ones(3,4) \n y = zeros(3,4) \n periodic_euclidean(x,y)\n @@ -76,7 +78,7 @@ using NearestNeighbors 0.0 0.0 - Since [0,0,0] is equal to [1,1,1] in a periodic lattice. + Since [0,0,0] is equal to [1,1,1] in a periodic lattice. """ function periodic_euclidean(a::AbstractArray, b::AbstractArray) ld = abs(b .- a) @@ -91,7 +93,7 @@ function periodic_euclidean(a::AbstractArray, b::AbstractArray) end res end - + type PeriodicEuclidean <: Metric end " Find the periodic images we may want to check with the tree" @@ -106,7 +108,7 @@ function periodic_images{T <: AbstractFloat}(p::AbstractArray{T}) ni = zeros(p) for m in 1:2^ndim bity = digits(m-1,2,ndim) - for i in 1:ndim + for i in 1:ndim res[i,m] = n[i,bity[i]+1] end end @@ -119,12 +121,12 @@ function findN_NN{T <: AbstractFloat}(tree::KDTree{T}, points::AbstractArray{T}, @assert tree.reordered==false idxs,dists = knn(tree,points, k, true) # returns sorted # if the farthest point si far enoguh to cross box edge also check - # the periodic images within a search radius equal to that. + # the periodic images within a search radius equal to that. if periodic l = 1.*dists[end] npt = periodic_images(points) nid = idxs - for i in 2:size(npt,2) # the first (original point) is already done + for i in 2:size(npt,2) # the first (original point) is already done append!(nid, inrange(tree, npt[:,i], l, false)) end dists = periodic_euclidean(tree.data[:,nid], points) @@ -154,10 +156,27 @@ function compute_smoothing_lengths{T <: AbstractFloat}(h::AbstractArray{T}, nothing end +function initialize_velocities_sinusoidal(x,v) + Npart = size(x,2) + Nx = ParticleDimensions[1] + for i in 1:Npart + v[1,i] = 2 * sin(2pi * x[1,i]) # x velocity gets a sine wave and x is between [0,1] + end + nothing +end + +function UniformParticlesWaveVelocity(gp, gd, p) + initialize_particles_uniform(p["x"]) + initialize_velocities_sinusoidal(p["x"], p["v"]) + + nothing +end + + function compute_SPH_densities_and_h{T <: AbstractFloat}(rho::AbstractArray{T}, h::AbstractArray{T}, tree::KDTree{T}, - m, + m, Nnghb) @assert size(m,1) == 1 ndim = size(tree.data,1) @@ -170,14 +189,14 @@ function compute_SPH_densities_and_h{T <: AbstractFloat}(rho::AbstractArray{T}, rho[i] += m*W(dists[j],h[i],ndim=ndim) end end - + nothing end function compute_SPH_densities{T <: AbstractFloat}(rho::AbstractArray{T}, x::AbstractArray{T}, tree::KDTree{T}, - m, + m, Nnghb) # @assert size(m,1) == 1 # @assert size(tree.data,1) = size(x,1) @@ -191,7 +210,7 @@ function compute_SPH_densities{T <: AbstractFloat}(rho::AbstractArray{T}, rho[i] += m*W(dists[j],h[i],ndim=ndim) end end - + nothing end @@ -200,7 +219,7 @@ function compute_SPH_pressures(P,rho,entropy) # isothermal equation of state hardcoded for now for i in eachindex(rho) - P[i] = entropy[i]*rho[i]^(GAMMA-1) + P[i] = entropy[i]*rho[i]^(GAMMA-1) end end @@ -210,6 +229,7 @@ end function SPH_accelerations_and_update_entropy(a, + v, P, entropy, rho, @@ -217,15 +237,18 @@ function SPH_accelerations_and_update_entropy(a, tree, m, Nnghb, dt ) @assert size(m,1) == 1 + ArtBulkViscConst = 1 + ArtBulkViscConstB = 2 + ndim = size(tree.data,1) np = size(tree.data,2) dx = zeros(ndim) for i in 1:np dtEntropy = 0. id, dists = findN_NN(tree, tree.data[:,i], Nnghb,periodic=true) - + a[:,i] = 0. - p_over_rho2_i = P[i]/rho[i]^2 + p_over_rho2_i = P[i]/rho[i]^2 soundspeed_i = soundspeed(P[i], rho[i]) for k in 2:Nnghb j = id[k] @@ -261,7 +284,7 @@ function SPH_accelerations_and_update_entropy(a, hfc = hfc_visc + m*(p_over_rho2_i*dW_i+P[j]/rho[j]^2*dW_j)/r for dim in 1:ndim - a[dim,i] -= hfc*dx[dim] + a[dim,i] -= hfc*dx[dim] end @@ -273,51 +296,76 @@ function SPH_accelerations_and_update_entropy(a, end -function evolveSPH(x,v,P,m,dtin,tfinal; tout=-1) +function evolveSPH(gp, gd, p) + c = conf + + g = gp[1] # only supporting one grid patch for now + x = p["x"] + v = p["v"] + m = p["m"] + rho = gd[1].d["ρD"] + + φ = gd[1].d["Φ"] # potential + acc = zeros((3,g.dim...)) + rt = rfft(φ) # initialize buffer for real-to complex transform - pa = zeros(x) # particle accelerations + Ngb = 32 # hard coded from previous example + Npart = size(x,2) + pa_grav = zeros(x) # particle accelerations due to gravity + pa_sph = zeros(x) # particle accelerations + pa = zeros(x) prho = zeros(Npart) # particle densities - h = zeros(Npart) # + h = zeros(Npart) + P = ones(prho) # TODO: check this is okay + tree = KDTree(x,reorder=false) # construct tree for searching compute_SPH_densities_and_h(prho, h, tree, m, Ngb) entropy = P./prho.^(GAMMA-1) # initialize entropy from Pressure - - dt = dtin - ttime = 0. - while ttime < tfinal + dt = InitialDt + dx = 1. /maximum(g.dim) # smallest dx + + c["CurrentTime"] = c["StartTime"] + c["CurrentCycle"] = c["StartCycle"] + + while c["CurrentTime"] < StopTime && c["CurrentCycle"] < StopCycle make_periodic(x) tree = KDTree(x,reorder=false) # construct tree for searching compute_SPH_densities_and_h(prho, h, tree, m, Ngb) compute_SPH_pressures(P,prho,entropy) - cs = maximum(soundspeed(P,prho)) - vm = maximum(v) + cs = maximum(abs(soundspeed(P,prho))) + vm = maximum(abs(v)) # conservative new timestep - dt = 0.1*minimum(h)/(cs+vm) - if ((ttime+dt) > tfinal ) - dt = 1.0001*(tfinal - ttime) - println("final dt", dt) + dt = CourantFactor*dx/(cs + vm) + dt = minimum([dt, MaximumDt]) + c["CurrentTime"] += dt + if ((c["CurrentTime"]+dt) > c["StopTime"] ) + dt = (1. + 1e-15)*(c["StopTime"] - c["CurrentTime"]) + println("final dt = ", dt) end + c["CurrentCycle"] += 1 println("dt:", dt) - - SPH_accelerations_and_update_entropy(pa, P, entropy, + + SPH_accelerations_and_update_entropy(pa_sph, v, P, entropy, prho, h, tree, m, Ngb, dt) - # LeapFrog step. Some codes combine the two drift steps + deposit(rho,x,m,interpolation=ParticleDepositInterpolation) + rho_mean = mean(rho) + rho -= rho_mean # the total sum of the density needs to be zero for a periodic signal + compute_potential(φ, rho, rt) + compute_acceleration(acc, φ) + interpVecToPoints(pa_grav, acc, x, interpolation=ParticleBackInterpolation) + pa = pa_grav + pa_sph + # LeapFrog step. Some codes combine the two drift steps drift(x,v,dt/2) kick(v,pa,dt) drift(x,v,dt/2) - - ttime += dt end - if debug - println("Evolved to time:", ttime) - end prho, h, P, pa, entropy end @@ -345,7 +393,6 @@ function sph_particle_output(fname, x, v, rho, P; overwrite=false) end -#const GAMMA = 5./3 # default adiabatic index #using PyPlot