Merge branch 'dev' into fix-gitactions

This commit is contained in:
JacqueGM 2023-08-07 18:23:05 +02:00 committed by GitHub
commit 9e7079b9eb
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
12 changed files with 255 additions and 43 deletions

16
.github/workflows/container-image.yml vendored Normal file
View file

@ -0,0 +1,16 @@
name: Container Image
on:
push:
branches:
- dev
- feature/server_only
workflow_dispatch:
jobs:
call-build-and-push:
name: Call
uses: CirclesUBI/.github/.github/workflows/build-and-push.yml@main
with:
image-name: pathfinder2
secrets: inherit

View file

@ -1,9 +1,8 @@
name: BuildAndTest
on:
push:
pull_request:
branches: [ "main" ]
branches: [ "dev" ]
env:
CARGO_TERM_COLOR: always
@ -23,17 +22,20 @@ jobs:
~/.cargo/git
target
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.toml') }}
- name: Setup PATH
run: echo ~/.foundry/bin/ >> $GITHUB_PATH
- name: Install Foundry
uses: foundry-rs/foundry-toolchain@v1
- name: Build
run: cargo build --verbose
- name: Download safes
run: wget -q -c https://rpc.circlesubi.id/pathfinder-db/capacity_graph.db
- name: Run tests
run: cargo test --verbose
- name: Lint
run: cargo clippy --all --all-features -- -D warnings
- name: Format
run: cargo fmt --check --verbose
- name: Lint
run: cargo clippy --all --all-features -- -D warnings
- name: Build
run: cargo build --verbose
- name: Download safes
run: wget -q -c https://rpc.circlesubi.id/pathfinder-db/capacity_graph.db
- name: Run tests
run: cargo test --verbose

3
.gitignore vendored
View file

@ -8,3 +8,6 @@ Cargo.lock
# These are backup files generated by rustfmt
**/*.rs.bk
# Capacity graph runtime state
capacity_graph.db

View file

@ -12,3 +12,4 @@ json = "^0.12.4"
num-bigint = "^0.4.3"
serde = { version = "1.0.149", features = ["serde_derive"] }
serde_json = "1.0.89"
regex = "1.8.1"

15
Dockerfile Normal file
View file

@ -0,0 +1,15 @@
FROM rust:latest AS build
WORKDIR /build
COPY . .
RUN cargo install --path .
RUN cargo build --release
FROM rust AS app
WORKDIR /app
COPY --from=build /build/target/release .
RUN chmod +x ./server
ENTRYPOINT ["./server"]

View file

@ -1,20 +1,20 @@
## Pathfinder2
# Pathfinder2
Pathfinder is a collection of tools related to
computing transitive transfers in the
[CirclesUBI](https://joincircles.net) trust graph.
### Building
## Building
This is a rust project, so assuming `cargo` is installed, `cargo build`
creates two binaries: The server (default) and the cli.
This is a rust project, so assuming `cargo` is installed, `cargo build` creates three binaries:
The `server` (default), the `cli` and the `convert` tool.
Both need a file that contains the trust graph edges to work.
All need a file that contains the trust graph edges to work.
A reasonably up to date edge database file can be obtained from
https://chriseth.github.io/pathfinder2/edges.dat
- https://circlesubi.github.io/pathfinder2/edges.dat
#### Using the Server
### Using the Server
`cargo run --release <ip-address>:<port>` will start a JSON-RPC server listening on the given port.
@ -29,18 +29,52 @@ Number of worker threads: 4
Size of request queue: 10
#### Using the CLI
#### Run with test data
1) Download the balances and trust binary dump from [binary dump from 2023-05-23](graph_at_20230523_15_00.db)
2) Start the server with `cargo run --release <ip-address>:<port>`
3) Import the data with the curl command below
4) Query the server with the curl command below
The CLI will load an edge database file and compute the transitive transfers
from one source to one destination. You can limit the number of hops to explore
and the maximum amount of circles to transfer.
The data can be imported into a running pathfinder2 server with the following command:
```shell
curl -X POST \
-H "Content-Type: application/json" \
-d '{
"id": "timestamp_value",
"method": "load_safes_binary",
"params": {
"file": "/path/to/graph_at_20230523_15_00.db"
}
}' \
"http://<ip>:<port>"
```
afterward the server can be queried with the following command:
```shell
curl -X POST \
-H "Content-Type: application/json" \
-d '{
"id": "timestamp_value",
"method": "compute_transfer",
"params": {
"from": "0x000...",
"to": "0x000...",
"value": 999999999999,
"iterative": false,
"prune": true
}
}' \
"http://<ip>:<port>"
```
### Using the CLI
The CLI will load an edge database file and compute the transitive transfers from one source to one destination. You can limit the number of hops to explore and the maximum amount of circles to transfer.
The options are:
`cargo run --release --bin cli <from> <to> <edges.dat> [<max_hops> [<max_amount>]] [--dot <dotfile>]`
For example
For example:
`cargo run --release --bin cli 0x9BA1Bcd88E99d6E1E03252A70A63FEa83Bf1208c 0x42cEDde51198D1773590311E2A340DC06B24cB37 edges.dat 3 1000000000000000000`
@ -48,7 +82,7 @@ Computes a transfer of at most `1000000000000000000`, exploring 3 hops.
If you specify `--dot <dotfile>`, a graphviz/dot representation of the transfer graph is written to the given file.
#### Conversion Tool
### Conversion Tool
The conversion tool can convert between different ways of representing the edge and trust relations in the circles system.
All data formats are described in https://hackmd.io/Gg04t7gjQKeDW2Q6Jchp0Q

View file

@ -15,7 +15,7 @@ query="""{
}""".replace('\n', ' ')
#API='https://graph.circles.garden/subgraphs/name/CirclesUBI/circles-subgraph'
API='https://api.thegraph.com/subgraphs/name/circlesubi/circles'
API='https://api.thegraph.com/subgraphs/name/circlesubi/circles-ubi'
lastID = 0

BIN
graph_at_20230523_15_00.db Normal file

Binary file not shown.

View file

@ -6,5 +6,18 @@ fn main() {
let listen_at = env::args()
.nth(1)
.unwrap_or_else(|| "127.0.0.1:8080".to_string());
server::start_server(&listen_at, 10, 4);
let queue_size = env::args()
.nth(2)
.unwrap_or_else(|| "10".to_string())
.parse::<usize>()
.unwrap();
let thread_count = env::args()
.nth(3)
.unwrap_or_else(|| "4".to_string())
.parse::<u64>()
.unwrap();
server::start_server(&listen_at, queue_size, thread_count);
}

View file

@ -46,7 +46,7 @@ pub fn read_edges_csv(path: &String) -> Result<EdgeDB, io::Error> {
pub fn write_edges_binary(edges: &EdgeDB, path: &String) -> Result<(), io::Error> {
let mut file = File::create(path)?;
let address_index = write_address_index(&mut file, edges)?;
let address_index = write_address_index(&mut file, addresses_from_edges(edges))?;
write_edges(&mut file, edges, &address_index)
}
@ -123,6 +123,46 @@ pub fn import_from_safes_binary(path: &str) -> Result<DB, io::Error> {
Ok(DB::new(safes, token_owner))
}
pub fn export_safes_to_binary(db: &DB, path: &str) -> Result<(), io::Error> {
let mut file = File::create(path)?;
let address_index = write_address_index(&mut file, addresses_from_safes(db.safes()))?;
// organizations
let organizations = db.safes().iter().filter(|s| s.1.organization);
write_u32(&mut file, organizations.clone().count() as u32)?;
for (user, _) in organizations {
write_address(&mut file, user, &address_index)?;
}
// trust edges
let trust_edges = db.safes().iter().flat_map(|(user, safe)| {
safe.limit_percentage
.iter()
.map(|(other, percentage)| (*user, other, percentage))
});
write_u32(&mut file, trust_edges.clone().count() as u32)?;
for (user, send_to, percentage) in trust_edges {
write_address(&mut file, &user, &address_index)?;
write_address(&mut file, send_to, &address_index)?;
write_u8(&mut file, *percentage)?;
}
// balances
let balances = db.safes().iter().flat_map(|(user, safe)| {
safe.balances
.iter()
.map(|(token_owner, amount)| (*user, token_owner, amount))
});
write_u32(&mut file, balances.clone().count() as u32)?;
for (user, token_owner, amount) in balances {
write_address(&mut file, &user, &address_index)?;
write_address(&mut file, token_owner, &address_index)?;
write_u256(&mut file, amount)?;
}
Ok(())
}
fn read_address_index(file: &mut File) -> Result<HashMap<u32, Address>, io::Error> {
let address_count = read_u32(file)?;
let mut addresses = HashMap::new();
@ -134,10 +174,7 @@ fn read_address_index(file: &mut File) -> Result<HashMap<u32, Address>, io::Erro
Ok(addresses)
}
fn write_address_index(
file: &mut File,
edges: &EdgeDB,
) -> Result<HashMap<Address, u32>, io::Error> {
fn addresses_from_edges(edges: &EdgeDB) -> BTreeSet<Address> {
let mut addresses = BTreeSet::new();
for Edge {
from, to, token, ..
@ -147,6 +184,37 @@ fn write_address_index(
addresses.insert(*to);
addresses.insert(*token);
}
addresses
}
fn addresses_from_safes(safes: &BTreeMap<Address, Safe>) -> BTreeSet<Address> {
let mut addresses = BTreeSet::new();
for (
user,
Safe {
token_address,
balances,
limit_percentage,
organization: _,
},
) in safes
{
addresses.insert(*user);
addresses.insert(*token_address);
for other in balances.keys() {
addresses.insert(*other);
}
for other in limit_percentage.keys() {
addresses.insert(*other);
}
}
addresses
}
fn write_address_index(
file: &mut File,
addresses: BTreeSet<Address>,
) -> Result<HashMap<Address, u32>, io::Error> {
write_u32(file, addresses.len() as u32)?;
let mut index = HashMap::new();
for (i, addr) in addresses.into_iter().enumerate() {

View file

@ -21,6 +21,10 @@ impl DB {
db
}
pub fn safes(&self) -> &BTreeMap<Address, Safe> {
&self.safes
}
pub fn edges(&self) -> &EdgeDB {
&self.edges
}

View file

@ -3,11 +3,15 @@ use crate::io::{import_from_safes_binary, read_edges_binary, read_edges_csv};
use crate::types::edge::EdgeDB;
use crate::types::{Address, Edge, U256};
use json::JsonValue;
use num_bigint::BigUint;
use regex::Regex;
use std::error::Error;
use std::fmt::{Debug, Display, Formatter};
use std::io::Read;
use std::io::{BufRead, BufReader, Write};
use std::net::{TcpListener, TcpStream};
use std::ops::Deref;
use std::str::FromStr;
use std::sync::mpsc::TrySendError;
use std::sync::{mpsc, Arc, Mutex, RwLock};
use std::thread;
@ -18,6 +22,52 @@ struct JsonRpcRequest {
params: JsonValue,
}
struct InputValidationError(String);
impl Error for InputValidationError {}
impl Debug for InputValidationError {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "Error: {}", self.0)
}
}
impl Display for InputValidationError {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "Error: {}", self.0)
}
}
fn validate_and_parse_ethereum_address(address: &str) -> Result<Address, Box<dyn Error>> {
let re = Regex::new(r"^0x[0-9a-fA-F]{40}$").unwrap();
if re.is_match(address) {
Ok(Address::from(address))
} else {
Err(Box::new(InputValidationError(format!(
"Invalid Ethereum address: {}",
address
))))
}
}
fn validate_and_parse_u256(value_str: &str) -> Result<U256, Box<dyn Error>> {
match BigUint::from_str(value_str) {
Ok(parsed_value) => {
if parsed_value > U256::MAX.into() {
Err(Box::new(InputValidationError(format!(
"Value {} is too large. Maximum value is {}.",
parsed_value,
U256::MAX
))))
} else {
Ok(U256::from_bigint_truncating(parsed_value))
}
}
Err(e) => Err(Box::new(InputValidationError(format!(
"Invalid value: {}. Couldn't parse value: {}",
value_str, e
)))),
}
}
pub fn start_server(listen_at: &str, queue_size: usize, threads: u64) {
let edges: Arc<RwLock<Arc<EdgeDB>>> = Arc::new(RwLock::new(Arc::new(EdgeDB::default())));
@ -137,22 +187,28 @@ fn compute_transfer(
mut socket: TcpStream,
) -> Result<(), Box<dyn Error>> {
socket.write_all(chunked_header().as_bytes())?;
let parsed_value_param = match request.params["value"].as_str() {
Some(value_str) => validate_and_parse_u256(value_str)?,
None => U256::MAX,
};
let from_address = validate_and_parse_ethereum_address(&request.params["from"].to_string())?;
let to_address = validate_and_parse_ethereum_address(&request.params["to"].to_string())?;
let max_distances = if request.params["iterative"].as_bool().unwrap_or_default() {
vec![Some(1), Some(2), None]
} else {
vec![None]
};
let max_transfers = request.params["max_transfers"].as_u64();
for max_distance in max_distances {
let (flow, transfers) = graph::compute_flow(
&Address::from(request.params["from"].to_string().as_str()),
&Address::from(request.params["to"].to_string().as_str()),
&from_address,
&to_address,
edges,
if request.params.has_key("value") {
U256::from(request.params["value"].to_string().as_str())
} else {
U256::MAX
},
parsed_value_param,
max_distance,
max_transfers,
);