$ sudo apt install build-essential unzip
$ cd /usr/local/src
$ sudo wget https://download.osgeo.org/proj/proj-7.1.1.tar.gz
$ sudo tar -xzvf proj-7.1.1.tar.gz
$ sudo rm proj-7.1.1.tar.gz
$ cd proj-7.1.1
# See: https://datatracker.ietf.org/doc/html/rfc2324 (HTCPCP/1.0) | |
async def bot_detection_middleware(request: Request, call_next): | |
is_bot_request = await is_bot(request) | |
if datetime.now().month == 4 and datetime.now().day == 1 and is_bot_request: | |
return JSONResponse({"message": "No coffee"}, status_code=418) | |
elif is_bot_request: | |
return JSONResponse({"message": "Too Many Requests"}, status_code=429) | |
response = await call_next(request) |
// mcp-client.js | |
export class MCPClient { | |
constructor({ eventUrl, rpcUrl }) { | |
this.rpcUrl = rpcUrl; | |
this.pending = new Map(); | |
this.sessionId = null; | |
this.endpoint = null; | |
this.initialized = false; | |
this.initId = null; |
class Snowflake | |
require 'digest' | |
require 'jwt' | |
require 'securerandom' | |
class Client | |
attr_reader :authorization, :expiration | |
attr_accessor :account, :region, :user, :private_key_path | |
def initialize(attributes={}) |
// ./spark-shell \ | |
// --packages org.apache.sedona:sedona-core-3.0_2.12:1.0.1-incubating,org.apache.sedona:sedona-sql-3.0_2.12:1.0.1-incubating,org.locationtech.jts:jts-core:1.18.2,org.datasyslab:geotools-wrapper:geotools-24.1 \ | |
// --conf spark.serializer=org.apache.spark.serializer.KryoSerializer \ | |
// --conf spark.kryo.registrator=org.apache.sedona.core.serde.SedonaKryoRegistrator | |
import org.apache.sedona.core.formatMapper.shapefileParser.ShapefileReader | |
import org.apache.sedona.core.spatialRDD.SpatialRDD | |
import org.apache.sedona.sql.utils.{Adapter, SedonaSQLRegistrator} | |
import org.locationtech.jts.geom.{Coordinate, Geometry, GeometryFactory} |
// Apache Spark version 3.1.2 | |
// Using Scala version 2.12.10 (OpenJDK 64-Bit Server VM, Java 1.8.0_292) | |
val casesUrl = "https://usafactsstatic.blob.core.windows.net/public/data/covid-19/covid_confirmed_usafacts.csv" | |
val deathsUrl = "https://usafactsstatic.blob.core.windows.net/public/data/covid-19/covid_deaths_usafacts.csv" | |
def getData (dataUrl:String) : org.apache.spark.sql.DataFrame = { | |
var responseList = scala.io.Source.fromURL(dataUrl).mkString.stripMargin.linesIterator.toList | |
var csvData = spark.sparkContext.parallelize(responseList).toDS() |