mirror of
https://github.com/SagerNet/sing-box.git
synced 2025-06-13 21:54:13 +08:00
Compare commits
26 Commits
dev-next
...
v1.10.0-be
Author | SHA1 | Date | |
---|---|---|---|
![]() |
100d971218 | ||
![]() |
a8067f6eb8 | ||
![]() |
b48db4778d | ||
![]() |
a925fff9d0 | ||
![]() |
d21bd8f37b | ||
![]() |
5bff64ab72 | ||
![]() |
f5ccff0dc8 | ||
![]() |
3428f6dc07 | ||
![]() |
42643c6489 | ||
![]() |
cd2edabba7 | ||
![]() |
5fe026b170 | ||
![]() |
f026b661bf | ||
![]() |
9219211d4b | ||
![]() |
e6f2d37e93 | ||
![]() |
06e8ee885d | ||
![]() |
9dcb96e919 | ||
![]() |
ed3f5c3c41 | ||
![]() |
3986d2aa51 | ||
![]() |
6821954630 | ||
![]() |
aecfff29cb | ||
![]() |
969f7e02cd | ||
![]() |
31f98525be | ||
![]() |
f523029953 | ||
![]() |
791b840919 | ||
![]() |
fd30f5c6c2 | ||
![]() |
c6aedd125b |
43
.github/workflows/debug.yml
vendored
43
.github/workflows/debug.yml
vendored
@ -28,31 +28,10 @@ jobs:
|
|||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: ^1.22
|
go-version: ^1.23
|
||||||
continue-on-error: true
|
|
||||||
- name: Run Test
|
- name: Run Test
|
||||||
run: |
|
run: |
|
||||||
go test -v ./...
|
go test -v ./...
|
||||||
build_go118:
|
|
||||||
name: Debug build (Go 1.18)
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
- name: Setup Go
|
|
||||||
uses: actions/setup-go@v5
|
|
||||||
with:
|
|
||||||
go-version: ~1.18
|
|
||||||
- name: Cache go module
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/go/pkg/mod
|
|
||||||
key: go118-${{ hashFiles('**/go.sum') }}
|
|
||||||
- name: Run Test
|
|
||||||
run: make ci_build_go118
|
|
||||||
build_go120:
|
build_go120:
|
||||||
name: Debug build (Go 1.20)
|
name: Debug build (Go 1.20)
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
@ -93,6 +72,26 @@ jobs:
|
|||||||
key: go121-${{ hashFiles('**/go.sum') }}
|
key: go121-${{ hashFiles('**/go.sum') }}
|
||||||
- name: Run Test
|
- name: Run Test
|
||||||
run: make ci_build
|
run: make ci_build
|
||||||
|
build_go122:
|
||||||
|
name: Debug build (Go 1.22)
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
- name: Setup Go
|
||||||
|
uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version: ~1.22
|
||||||
|
- name: Cache go module
|
||||||
|
uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
~/go/pkg/mod
|
||||||
|
key: go122-${{ hashFiles('**/go.sum') }}
|
||||||
|
- name: Run Test
|
||||||
|
run: make ci_build
|
||||||
cross:
|
cross:
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
|
2
.github/workflows/lint.yml
vendored
2
.github/workflows/lint.yml
vendored
@ -28,7 +28,7 @@ jobs:
|
|||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: ^1.22
|
go-version: ^1.23
|
||||||
- name: golangci-lint
|
- name: golangci-lint
|
||||||
uses: golangci/golangci-lint-action@v6
|
uses: golangci/golangci-lint-action@v6
|
||||||
with:
|
with:
|
||||||
|
2
.github/workflows/linux.yml
vendored
2
.github/workflows/linux.yml
vendored
@ -16,7 +16,7 @@ jobs:
|
|||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: ^1.22
|
go-version: ^1.23
|
||||||
- name: Extract signing key
|
- name: Extract signing key
|
||||||
run: |-
|
run: |-
|
||||||
mkdir -p $HOME/.gnupg
|
mkdir -p $HOME/.gnupg
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
FROM --platform=$BUILDPLATFORM golang:1.22-alpine AS builder
|
FROM --platform=$BUILDPLATFORM golang:1.23-alpine AS builder
|
||||||
LABEL maintainer="nekohasekai <contact-git@sekai.icu>"
|
LABEL maintainer="nekohasekai <contact-git@sekai.icu>"
|
||||||
COPY . /go/src/github.com/sagernet/sing-box
|
COPY . /go/src/github.com/sagernet/sing-box
|
||||||
WORKDIR /go/src/github.com/sagernet/sing-box
|
WORKDIR /go/src/github.com/sagernet/sing-box
|
||||||
@ -21,7 +21,7 @@ FROM --platform=$TARGETPLATFORM alpine AS dist
|
|||||||
LABEL maintainer="nekohasekai <contact-git@sekai.icu>"
|
LABEL maintainer="nekohasekai <contact-git@sekai.icu>"
|
||||||
RUN set -ex \
|
RUN set -ex \
|
||||||
&& apk upgrade \
|
&& apk upgrade \
|
||||||
&& apk add bash tzdata ca-certificates \
|
&& apk add bash tzdata ca-certificates nftables \
|
||||||
&& rm -rf /var/cache/apk/*
|
&& rm -rf /var/cache/apk/*
|
||||||
COPY --from=builder /go/bin/sing-box /usr/local/bin/sing-box
|
COPY --from=builder /go/bin/sing-box /usr/local/bin/sing-box
|
||||||
ENTRYPOINT ["sing-box"]
|
ENTRYPOINT ["sing-box"]
|
||||||
|
9
Makefile
9
Makefile
@ -1,7 +1,6 @@
|
|||||||
NAME = sing-box
|
NAME = sing-box
|
||||||
COMMIT = $(shell git rev-parse --short HEAD)
|
COMMIT = $(shell git rev-parse --short HEAD)
|
||||||
TAGS_GO118 = with_gvisor,with_dhcp,with_wireguard,with_reality_server,with_clash_api
|
TAGS_GO120 = with_gvisor,with_dhcp,with_wireguard,with_reality_server,with_clash_api,with_quic,with_utls
|
||||||
TAGS_GO120 = with_quic,with_utls
|
|
||||||
TAGS_GO121 = with_ech
|
TAGS_GO121 = with_ech
|
||||||
TAGS ?= $(TAGS_GO118),$(TAGS_GO120),$(TAGS_GO121)
|
TAGS ?= $(TAGS_GO118),$(TAGS_GO120),$(TAGS_GO121)
|
||||||
TAGS_TEST ?= with_gvisor,with_quic,with_wireguard,with_grpc,with_ech,with_utls,with_reality_server
|
TAGS_TEST ?= with_gvisor,with_quic,with_wireguard,with_grpc,with_ech,with_utls,with_reality_server
|
||||||
@ -20,13 +19,9 @@ PREFIX ?= $(shell go env GOPATH)
|
|||||||
build:
|
build:
|
||||||
go build $(MAIN_PARAMS) $(MAIN)
|
go build $(MAIN_PARAMS) $(MAIN)
|
||||||
|
|
||||||
ci_build_go118:
|
|
||||||
go build $(PARAMS) $(MAIN)
|
|
||||||
go build $(PARAMS) -tags "$(TAGS_GO118)" $(MAIN)
|
|
||||||
|
|
||||||
ci_build_go120:
|
ci_build_go120:
|
||||||
go build $(PARAMS) $(MAIN)
|
go build $(PARAMS) $(MAIN)
|
||||||
go build $(PARAMS) -tags "$(TAGS_GO118),$(TAGS_GO120)" $(MAIN)
|
go build $(PARAMS) -tags "$(TAGS_GO120)" $(MAIN)
|
||||||
|
|
||||||
ci_build:
|
ci_build:
|
||||||
go build $(PARAMS) $(MAIN)
|
go build $(PARAMS) $(MAIN)
|
||||||
|
@ -4,14 +4,13 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"io"
|
|
||||||
"net"
|
"net"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/sagernet/sing-box/common/urltest"
|
"github.com/sagernet/sing-box/common/urltest"
|
||||||
"github.com/sagernet/sing-dns"
|
"github.com/sagernet/sing-dns"
|
||||||
N "github.com/sagernet/sing/common/network"
|
N "github.com/sagernet/sing/common/network"
|
||||||
"github.com/sagernet/sing/common/rw"
|
"github.com/sagernet/sing/common/varbin"
|
||||||
)
|
)
|
||||||
|
|
||||||
type ClashServer interface {
|
type ClashServer interface {
|
||||||
@ -56,16 +55,15 @@ func (s *SavedRuleSet) MarshalBinary() ([]byte, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
err = rw.WriteUVariant(&buffer, uint64(len(s.Content)))
|
err = varbin.Write(&buffer, binary.BigEndian, s.Content)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
buffer.Write(s.Content)
|
|
||||||
err = binary.Write(&buffer, binary.BigEndian, s.LastUpdated.Unix())
|
err = binary.Write(&buffer, binary.BigEndian, s.LastUpdated.Unix())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
err = rw.WriteVString(&buffer, s.LastEtag)
|
err = varbin.Write(&buffer, binary.BigEndian, s.LastEtag)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -79,12 +77,7 @@ func (s *SavedRuleSet) UnmarshalBinary(data []byte) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
contentLen, err := rw.ReadUVariant(reader)
|
err = varbin.Read(reader, binary.BigEndian, &s.Content)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
s.Content = make([]byte, contentLen)
|
|
||||||
_, err = io.ReadFull(reader, s.Content)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -94,7 +87,7 @@ func (s *SavedRuleSet) UnmarshalBinary(data []byte) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
s.LastUpdated = time.Unix(lastUpdated, 0)
|
s.LastUpdated = time.Unix(lastUpdated, 0)
|
||||||
s.LastEtag, err = rw.ReadVString(reader)
|
err = varbin.Read(reader, binary.BigEndian, &s.LastEtag)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -31,11 +31,16 @@ type InboundContext struct {
|
|||||||
Network string
|
Network string
|
||||||
Source M.Socksaddr
|
Source M.Socksaddr
|
||||||
Destination M.Socksaddr
|
Destination M.Socksaddr
|
||||||
Domain string
|
|
||||||
Protocol string
|
|
||||||
User string
|
User string
|
||||||
Outbound string
|
Outbound string
|
||||||
|
|
||||||
|
// sniffer
|
||||||
|
|
||||||
|
Protocol string
|
||||||
|
Domain string
|
||||||
|
Client string
|
||||||
|
SniffContext any
|
||||||
|
|
||||||
// cache
|
// cache
|
||||||
|
|
||||||
InboundDetour string
|
InboundDetour string
|
||||||
@ -51,7 +56,9 @@ type InboundContext struct {
|
|||||||
|
|
||||||
// rule cache
|
// rule cache
|
||||||
|
|
||||||
IPCIDRMatchSource bool
|
IPCIDRMatchSource bool
|
||||||
|
IPCIDRAcceptEmpty bool
|
||||||
|
|
||||||
SourceAddressMatch bool
|
SourceAddressMatch bool
|
||||||
SourcePortMatch bool
|
SourcePortMatch bool
|
||||||
DestinationAddressMatch bool
|
DestinationAddressMatch bool
|
||||||
@ -62,6 +69,7 @@ type InboundContext struct {
|
|||||||
|
|
||||||
func (c *InboundContext) ResetRuleCache() {
|
func (c *InboundContext) ResetRuleCache() {
|
||||||
c.IPCIDRMatchSource = false
|
c.IPCIDRMatchSource = false
|
||||||
|
c.IPCIDRAcceptEmpty = false
|
||||||
c.SourceAddressMatch = false
|
c.SourceAddressMatch = false
|
||||||
c.SourcePortMatch = false
|
c.SourcePortMatch = false
|
||||||
c.DestinationAddressMatch = false
|
c.DestinationAddressMatch = false
|
||||||
|
@ -10,15 +10,18 @@ import (
|
|||||||
"github.com/sagernet/sing-tun"
|
"github.com/sagernet/sing-tun"
|
||||||
"github.com/sagernet/sing/common/control"
|
"github.com/sagernet/sing/common/control"
|
||||||
N "github.com/sagernet/sing/common/network"
|
N "github.com/sagernet/sing/common/network"
|
||||||
|
"github.com/sagernet/sing/common/x/list"
|
||||||
"github.com/sagernet/sing/service"
|
"github.com/sagernet/sing/service"
|
||||||
|
|
||||||
mdns "github.com/miekg/dns"
|
mdns "github.com/miekg/dns"
|
||||||
|
"go4.org/netipx"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Router interface {
|
type Router interface {
|
||||||
Service
|
Service
|
||||||
PreStarter
|
PreStarter
|
||||||
PostStarter
|
PostStarter
|
||||||
|
Cleanup() error
|
||||||
|
|
||||||
Outbounds() []Outbound
|
Outbounds() []Outbound
|
||||||
Outbound(tag string) (Outbound, bool)
|
Outbound(tag string) (Outbound, bool)
|
||||||
@ -45,7 +48,9 @@ type Router interface {
|
|||||||
DefaultInterface() string
|
DefaultInterface() string
|
||||||
AutoDetectInterface() bool
|
AutoDetectInterface() bool
|
||||||
AutoDetectInterfaceFunc() control.Func
|
AutoDetectInterfaceFunc() control.Func
|
||||||
DefaultMark() int
|
DefaultMark() uint32
|
||||||
|
RegisterAutoRedirectOutputMark(mark uint32) error
|
||||||
|
AutoRedirectOutputMark() uint32
|
||||||
NetworkMonitor() tun.NetworkUpdateMonitor
|
NetworkMonitor() tun.NetworkUpdateMonitor
|
||||||
InterfaceMonitor() tun.DefaultInterfaceMonitor
|
InterfaceMonitor() tun.DefaultInterfaceMonitor
|
||||||
PackageManager() tun.PackageManager
|
PackageManager() tun.PackageManager
|
||||||
@ -92,12 +97,22 @@ type DNSRule interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type RuleSet interface {
|
type RuleSet interface {
|
||||||
|
Name() string
|
||||||
StartContext(ctx context.Context, startContext RuleSetStartContext) error
|
StartContext(ctx context.Context, startContext RuleSetStartContext) error
|
||||||
|
PostStart() error
|
||||||
Metadata() RuleSetMetadata
|
Metadata() RuleSetMetadata
|
||||||
|
ExtractIPSet() []*netipx.IPSet
|
||||||
|
IncRef()
|
||||||
|
DecRef()
|
||||||
|
Cleanup()
|
||||||
|
RegisterCallback(callback RuleSetUpdateCallback) *list.Element[RuleSetUpdateCallback]
|
||||||
|
UnregisterCallback(element *list.Element[RuleSetUpdateCallback])
|
||||||
Close() error
|
Close() error
|
||||||
HeadlessRule
|
HeadlessRule
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type RuleSetUpdateCallback func(it RuleSet)
|
||||||
|
|
||||||
type RuleSetMetadata struct {
|
type RuleSetMetadata struct {
|
||||||
ContainsProcessRule bool
|
ContainsProcessRule bool
|
||||||
ContainsWIFIRule bool
|
ContainsWIFIRule bool
|
||||||
|
29
box.go
29
box.go
@ -111,6 +111,7 @@ func New(options Options) (*Box, error) {
|
|||||||
ctx,
|
ctx,
|
||||||
router,
|
router,
|
||||||
logFactory.NewLogger(F.ToString("inbound/", inboundOptions.Type, "[", tag, "]")),
|
logFactory.NewLogger(F.ToString("inbound/", inboundOptions.Type, "[", tag, "]")),
|
||||||
|
tag,
|
||||||
inboundOptions,
|
inboundOptions,
|
||||||
options.PlatformInterface,
|
options.PlatformInterface,
|
||||||
)
|
)
|
||||||
@ -302,7 +303,11 @@ func (s *Box) start() error {
|
|||||||
return E.Cause(err, "initialize inbound/", in.Type(), "[", tag, "]")
|
return E.Cause(err, "initialize inbound/", in.Type(), "[", tag, "]")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return s.postStart()
|
err = s.postStart()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return s.router.Cleanup()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Box) postStart() error {
|
func (s *Box) postStart() error {
|
||||||
@ -312,16 +317,28 @@ func (s *Box) postStart() error {
|
|||||||
return E.Cause(err, "start ", serviceName)
|
return E.Cause(err, "start ", serviceName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, outbound := range s.outbounds {
|
// TODO: reorganize ALL start order
|
||||||
if lateOutbound, isLateOutbound := outbound.(adapter.PostStarter); isLateOutbound {
|
for _, out := range s.outbounds {
|
||||||
|
if lateOutbound, isLateOutbound := out.(adapter.PostStarter); isLateOutbound {
|
||||||
err := lateOutbound.PostStart()
|
err := lateOutbound.PostStart()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return E.Cause(err, "post-start outbound/", outbound.Tag())
|
return E.Cause(err, "post-start outbound/", out.Tag())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
err := s.router.PostStart()
|
||||||
return s.router.PostStart()
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, in := range s.inbounds {
|
||||||
|
if lateInbound, isLateInbound := in.(adapter.PostStarter); isLateInbound {
|
||||||
|
err = lateInbound.PostStart()
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "post-start inbound/", in.Tag())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Box) Close() error {
|
func (s *Box) Close() error {
|
||||||
|
@ -45,7 +45,9 @@ func (s *Box) startOutbounds() error {
|
|||||||
}
|
}
|
||||||
started[outboundTag] = true
|
started[outboundTag] = true
|
||||||
canContinue = true
|
canContinue = true
|
||||||
if starter, isStarter := outboundToStart.(common.Starter); isStarter {
|
if starter, isStarter := outboundToStart.(interface {
|
||||||
|
Start() error
|
||||||
|
}); isStarter {
|
||||||
monitor.Start("initialize outbound/", outboundToStart.Type(), "[", outboundTag, "]")
|
monitor.Start("initialize outbound/", outboundToStart.Type(), "[", outboundTag, "]")
|
||||||
err := starter.Start()
|
err := starter.Start()
|
||||||
monitor.Finish()
|
monitor.Finish()
|
||||||
|
@ -93,7 +93,7 @@ func buildAndroid() {
|
|||||||
|
|
||||||
const name = "libbox.aar"
|
const name = "libbox.aar"
|
||||||
copyPath := filepath.Join("..", "sing-box-for-android", "app", "libs")
|
copyPath := filepath.Join("..", "sing-box-for-android", "app", "libs")
|
||||||
if rw.FileExists(copyPath) {
|
if rw.IsDir(copyPath) {
|
||||||
copyPath, _ = filepath.Abs(copyPath)
|
copyPath, _ = filepath.Abs(copyPath)
|
||||||
err = rw.CopyFile(name, filepath.Join(copyPath, name))
|
err = rw.CopyFile(name, filepath.Join(copyPath, name))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -134,7 +134,7 @@ func buildiOS() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
copyPath := filepath.Join("..", "sing-box-for-apple")
|
copyPath := filepath.Join("..", "sing-box-for-apple")
|
||||||
if rw.FileExists(copyPath) {
|
if rw.IsDir(copyPath) {
|
||||||
targetDir := filepath.Join(copyPath, "Libbox.xcframework")
|
targetDir := filepath.Join(copyPath, "Libbox.xcframework")
|
||||||
targetDir, _ = filepath.Abs(targetDir)
|
targetDir, _ = filepath.Abs(targetDir)
|
||||||
os.RemoveAll(targetDir)
|
os.RemoveAll(targetDir)
|
||||||
|
@ -30,7 +30,7 @@ func FindSDK() {
|
|||||||
}
|
}
|
||||||
for _, path := range searchPath {
|
for _, path := range searchPath {
|
||||||
path = os.ExpandEnv(path)
|
path = os.ExpandEnv(path)
|
||||||
if rw.FileExists(filepath.Join(path, "licenses", "android-sdk-license")) {
|
if rw.IsFile(filepath.Join(path, "licenses", "android-sdk-license")) {
|
||||||
androidSDKPath = path
|
androidSDKPath = path
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -60,7 +60,7 @@ func FindSDK() {
|
|||||||
func findNDK() bool {
|
func findNDK() bool {
|
||||||
const fixedVersion = "26.2.11394342"
|
const fixedVersion = "26.2.11394342"
|
||||||
const versionFile = "source.properties"
|
const versionFile = "source.properties"
|
||||||
if fixedPath := filepath.Join(androidSDKPath, "ndk", fixedVersion); rw.FileExists(filepath.Join(fixedPath, versionFile)) {
|
if fixedPath := filepath.Join(androidSDKPath, "ndk", fixedVersion); rw.IsFile(filepath.Join(fixedPath, versionFile)) {
|
||||||
androidNDKPath = fixedPath
|
androidNDKPath = fixedPath
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@ -86,7 +86,7 @@ func findNDK() bool {
|
|||||||
})
|
})
|
||||||
for _, versionName := range versionNames {
|
for _, versionName := range versionNames {
|
||||||
currentNDKPath := filepath.Join(androidSDKPath, "ndk", versionName)
|
currentNDKPath := filepath.Join(androidSDKPath, "ndk", versionName)
|
||||||
if rw.FileExists(filepath.Join(androidSDKPath, versionFile)) {
|
if rw.IsFile(filepath.Join(androidSDKPath, versionFile)) {
|
||||||
androidNDKPath = currentNDKPath
|
androidNDKPath = currentNDKPath
|
||||||
log.Warn("reproducibility warning: using NDK version " + versionName + " instead of " + fixedVersion)
|
log.Warn("reproducibility warning: using NDK version " + versionName + " instead of " + fixedVersion)
|
||||||
return true
|
return true
|
||||||
@ -100,11 +100,11 @@ var GoBinPath string
|
|||||||
func FindMobile() {
|
func FindMobile() {
|
||||||
goBin := filepath.Join(build.Default.GOPATH, "bin")
|
goBin := filepath.Join(build.Default.GOPATH, "bin")
|
||||||
if runtime.GOOS == "windows" {
|
if runtime.GOOS == "windows" {
|
||||||
if !rw.FileExists(filepath.Join(goBin, "gobind.exe")) {
|
if !rw.IsFile(filepath.Join(goBin, "gobind.exe")) {
|
||||||
log.Fatal("missing gomobile installation")
|
log.Fatal("missing gomobile installation")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if !rw.FileExists(filepath.Join(goBin, "gobind")) {
|
if !rw.IsFile(filepath.Join(goBin, "gobind")) {
|
||||||
log.Fatal("missing gomobile installation")
|
log.Fatal("missing gomobile installation")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -87,7 +87,7 @@ func geoipExport(countryCode string) error {
|
|||||||
headlessRule.IPCIDR = append(headlessRule.IPCIDR, cidr.String())
|
headlessRule.IPCIDR = append(headlessRule.IPCIDR, cidr.String())
|
||||||
}
|
}
|
||||||
var plainRuleSet option.PlainRuleSetCompat
|
var plainRuleSet option.PlainRuleSetCompat
|
||||||
plainRuleSet.Version = C.RuleSetVersion1
|
plainRuleSet.Version = C.RuleSetVersion2
|
||||||
plainRuleSet.Options.Rules = []option.HeadlessRule{
|
plainRuleSet.Options.Rules = []option.HeadlessRule{
|
||||||
{
|
{
|
||||||
Type: C.RuleTypeDefault,
|
Type: C.RuleTypeDefault,
|
||||||
|
@ -70,7 +70,7 @@ func geositeExport(category string) error {
|
|||||||
headlessRule.DomainKeyword = defaultRule.DomainKeyword
|
headlessRule.DomainKeyword = defaultRule.DomainKeyword
|
||||||
headlessRule.DomainRegex = defaultRule.DomainRegex
|
headlessRule.DomainRegex = defaultRule.DomainRegex
|
||||||
var plainRuleSet option.PlainRuleSetCompat
|
var plainRuleSet option.PlainRuleSetCompat
|
||||||
plainRuleSet.Version = C.RuleSetVersion1
|
plainRuleSet.Version = C.RuleSetVersion2
|
||||||
plainRuleSet.Options.Rules = []option.HeadlessRule{
|
plainRuleSet.Options.Rules = []option.HeadlessRule{
|
||||||
{
|
{
|
||||||
Type: C.RuleTypeDefault,
|
Type: C.RuleTypeDefault,
|
||||||
|
@ -54,7 +54,11 @@ func merge(outputPath string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
err = rw.WriteFile(outputPath, buffer.Bytes())
|
err = rw.MkdirParent(outputPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = os.WriteFile(outputPath, buffer.Bytes(), 0o644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -6,7 +6,7 @@ import (
|
|||||||
|
|
||||||
var commandRuleSet = &cobra.Command{
|
var commandRuleSet = &cobra.Command{
|
||||||
Use: "rule-set",
|
Use: "rule-set",
|
||||||
Short: "Manage rule sets",
|
Short: "Manage rule-sets",
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -6,6 +6,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/sagernet/sing-box/common/srs"
|
"github.com/sagernet/sing-box/common/srs"
|
||||||
|
C "github.com/sagernet/sing-box/constant"
|
||||||
"github.com/sagernet/sing-box/log"
|
"github.com/sagernet/sing-box/log"
|
||||||
"github.com/sagernet/sing-box/option"
|
"github.com/sagernet/sing-box/option"
|
||||||
"github.com/sagernet/sing/common/json"
|
"github.com/sagernet/sing/common/json"
|
||||||
@ -55,10 +56,10 @@ func compileRuleSet(sourcePath string) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
ruleSet, err := plainRuleSet.Upgrade()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
ruleSet := plainRuleSet.Upgrade()
|
|
||||||
var outputPath string
|
var outputPath string
|
||||||
if flagRuleSetCompileOutput == flagRuleSetCompileDefaultOutput {
|
if flagRuleSetCompileOutput == flagRuleSetCompileDefaultOutput {
|
||||||
if strings.HasSuffix(sourcePath, ".json") {
|
if strings.HasSuffix(sourcePath, ".json") {
|
||||||
@ -73,7 +74,7 @@ func compileRuleSet(sourcePath string) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err = srs.Write(outputFile, ruleSet)
|
err = srs.Write(outputFile, ruleSet, plainRuleSet.Version == C.RuleSetVersion2)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
outputFile.Close()
|
outputFile.Close()
|
||||||
os.Remove(outputPath)
|
os.Remove(outputPath)
|
||||||
|
88
cmd/sing-box/cmd_rule_set_convert.go
Normal file
88
cmd/sing-box/cmd_rule_set_convert.go
Normal file
@ -0,0 +1,88 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/sagernet/sing-box/cmd/sing-box/internal/convertor/adguard"
|
||||||
|
"github.com/sagernet/sing-box/common/srs"
|
||||||
|
"github.com/sagernet/sing-box/log"
|
||||||
|
"github.com/sagernet/sing-box/option"
|
||||||
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
flagRuleSetConvertType string
|
||||||
|
flagRuleSetConvertOutput string
|
||||||
|
)
|
||||||
|
|
||||||
|
var commandRuleSetConvert = &cobra.Command{
|
||||||
|
Use: "convert [source-path]",
|
||||||
|
Short: "Convert adguard DNS filter to rule-set",
|
||||||
|
Args: cobra.ExactArgs(1),
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
err := convertRuleSet(args[0])
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
commandRuleSet.AddCommand(commandRuleSetConvert)
|
||||||
|
commandRuleSetConvert.Flags().StringVarP(&flagRuleSetConvertType, "type", "t", "", "Source type, available: adguard")
|
||||||
|
commandRuleSetConvert.Flags().StringVarP(&flagRuleSetConvertOutput, "output", "o", flagRuleSetCompileDefaultOutput, "Output file")
|
||||||
|
}
|
||||||
|
|
||||||
|
func convertRuleSet(sourcePath string) error {
|
||||||
|
var (
|
||||||
|
reader io.Reader
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
if sourcePath == "stdin" {
|
||||||
|
reader = os.Stdin
|
||||||
|
} else {
|
||||||
|
reader, err = os.Open(sourcePath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var rules []option.HeadlessRule
|
||||||
|
switch flagRuleSetConvertType {
|
||||||
|
case "adguard":
|
||||||
|
rules, err = adguard.Convert(reader)
|
||||||
|
case "":
|
||||||
|
return E.New("source type is required")
|
||||||
|
default:
|
||||||
|
return E.New("unsupported source type: ", flagRuleSetConvertType)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var outputPath string
|
||||||
|
if flagRuleSetConvertOutput == flagRuleSetCompileDefaultOutput {
|
||||||
|
if strings.HasSuffix(sourcePath, ".txt") {
|
||||||
|
outputPath = sourcePath[:len(sourcePath)-4] + ".srs"
|
||||||
|
} else {
|
||||||
|
outputPath = sourcePath + ".srs"
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
outputPath = flagRuleSetConvertOutput
|
||||||
|
}
|
||||||
|
outputFile, err := os.Create(outputPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer outputFile.Close()
|
||||||
|
err = srs.Write(outputFile, option.PlainRuleSet{Rules: rules}, true)
|
||||||
|
if err != nil {
|
||||||
|
outputFile.Close()
|
||||||
|
os.Remove(outputPath)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
outputFile.Close()
|
||||||
|
return nil
|
||||||
|
}
|
83
cmd/sing-box/cmd_rule_set_decompile.go
Normal file
83
cmd/sing-box/cmd_rule_set_decompile.go
Normal file
@ -0,0 +1,83 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/sagernet/sing-box/common/srs"
|
||||||
|
C "github.com/sagernet/sing-box/constant"
|
||||||
|
"github.com/sagernet/sing-box/log"
|
||||||
|
"github.com/sagernet/sing-box/option"
|
||||||
|
"github.com/sagernet/sing/common/json"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
var flagRuleSetDecompileOutput string
|
||||||
|
|
||||||
|
const flagRuleSetDecompileDefaultOutput = "<file_name>.json"
|
||||||
|
|
||||||
|
var commandRuleSetDecompile = &cobra.Command{
|
||||||
|
Use: "decompile [binary-path]",
|
||||||
|
Short: "Decompile rule-set binary to json",
|
||||||
|
Args: cobra.ExactArgs(1),
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
err := decompileRuleSet(args[0])
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
commandRuleSet.AddCommand(commandRuleSetDecompile)
|
||||||
|
commandRuleSetDecompile.Flags().StringVarP(&flagRuleSetDecompileOutput, "output", "o", flagRuleSetDecompileDefaultOutput, "Output file")
|
||||||
|
}
|
||||||
|
|
||||||
|
func decompileRuleSet(sourcePath string) error {
|
||||||
|
var (
|
||||||
|
reader io.Reader
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
if sourcePath == "stdin" {
|
||||||
|
reader = os.Stdin
|
||||||
|
} else {
|
||||||
|
reader, err = os.Open(sourcePath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
plainRuleSet, err := srs.Read(reader, true)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ruleSet := option.PlainRuleSetCompat{
|
||||||
|
Version: C.RuleSetVersion1,
|
||||||
|
Options: plainRuleSet,
|
||||||
|
}
|
||||||
|
var outputPath string
|
||||||
|
if flagRuleSetDecompileOutput == flagRuleSetDecompileDefaultOutput {
|
||||||
|
if strings.HasSuffix(sourcePath, ".srs") {
|
||||||
|
outputPath = sourcePath[:len(sourcePath)-4] + ".json"
|
||||||
|
} else {
|
||||||
|
outputPath = sourcePath + ".json"
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
outputPath = flagRuleSetDecompileOutput
|
||||||
|
}
|
||||||
|
outputFile, err := os.Create(outputPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
encoder := json.NewEncoder(outputFile)
|
||||||
|
encoder.SetIndent("", " ")
|
||||||
|
err = encoder.Encode(ruleSet)
|
||||||
|
if err != nil {
|
||||||
|
outputFile.Close()
|
||||||
|
os.Remove(outputPath)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
outputFile.Close()
|
||||||
|
return nil
|
||||||
|
}
|
@ -14,6 +14,7 @@ import (
|
|||||||
E "github.com/sagernet/sing/common/exceptions"
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
F "github.com/sagernet/sing/common/format"
|
F "github.com/sagernet/sing/common/format"
|
||||||
"github.com/sagernet/sing/common/json"
|
"github.com/sagernet/sing/common/json"
|
||||||
|
M "github.com/sagernet/sing/common/metadata"
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
@ -21,8 +22,8 @@ import (
|
|||||||
var flagRuleSetMatchFormat string
|
var flagRuleSetMatchFormat string
|
||||||
|
|
||||||
var commandRuleSetMatch = &cobra.Command{
|
var commandRuleSetMatch = &cobra.Command{
|
||||||
Use: "match <rule-set path> <domain>",
|
Use: "match <rule-set path> <IP address/domain>",
|
||||||
Short: "Check if a domain matches the rule set",
|
Short: "Check if an IP address or a domain matches the rule-set",
|
||||||
Args: cobra.ExactArgs(2),
|
Args: cobra.ExactArgs(2),
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
err := ruleSetMatch(args[0], args[1])
|
err := ruleSetMatch(args[0], args[1])
|
||||||
@ -62,14 +63,24 @@ func ruleSetMatch(sourcePath string, domain string) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
plainRuleSet = compat.Upgrade()
|
plainRuleSet, err = compat.Upgrade()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
case C.RuleSetFormatBinary:
|
case C.RuleSetFormatBinary:
|
||||||
plainRuleSet, err = srs.Read(bytes.NewReader(content), false)
|
plainRuleSet, err = srs.Read(bytes.NewReader(content), false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return E.New("unknown rule set format: ", flagRuleSetMatchFormat)
|
return E.New("unknown rule-set format: ", flagRuleSetMatchFormat)
|
||||||
|
}
|
||||||
|
ipAddress := M.ParseAddr(domain)
|
||||||
|
var metadata adapter.InboundContext
|
||||||
|
if ipAddress.IsValid() {
|
||||||
|
metadata.Destination = M.SocksaddrFrom(ipAddress, 0)
|
||||||
|
} else {
|
||||||
|
metadata.Domain = domain
|
||||||
}
|
}
|
||||||
for i, ruleOptions := range plainRuleSet.Rules {
|
for i, ruleOptions := range plainRuleSet.Rules {
|
||||||
var currentRule adapter.HeadlessRule
|
var currentRule adapter.HeadlessRule
|
||||||
@ -77,9 +88,7 @@ func ruleSetMatch(sourcePath string, domain string) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return E.Cause(err, "parse rule_set.rules.[", i, "]")
|
return E.Cause(err, "parse rule_set.rules.[", i, "]")
|
||||||
}
|
}
|
||||||
if currentRule.Match(&adapter.InboundContext{
|
if currentRule.Match(&metadata) {
|
||||||
Domain: domain,
|
|
||||||
}) {
|
|
||||||
println(F.ToString("match rules.[", i, "]: ", currentRule))
|
println(F.ToString("match rules.[", i, "]: ", currentRule))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
94
cmd/sing-box/cmd_rule_set_upgrade.go
Normal file
94
cmd/sing-box/cmd_rule_set_upgrade.go
Normal file
@ -0,0 +1,94 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
C "github.com/sagernet/sing-box/constant"
|
||||||
|
"github.com/sagernet/sing-box/log"
|
||||||
|
"github.com/sagernet/sing-box/option"
|
||||||
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
|
"github.com/sagernet/sing/common/json"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
var commandRuleSetUpgradeFlagWrite bool
|
||||||
|
|
||||||
|
var commandRuleSetUpgrade = &cobra.Command{
|
||||||
|
Use: "upgrade <source-path>",
|
||||||
|
Short: "Upgrade rule-set json",
|
||||||
|
Args: cobra.ExactArgs(1),
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
err := upgradeRuleSet(args[0])
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
commandRuleSetUpgrade.Flags().BoolVarP(&commandRuleSetUpgradeFlagWrite, "write", "w", false, "write result to (source) file instead of stdout")
|
||||||
|
commandRuleSet.AddCommand(commandRuleSetUpgrade)
|
||||||
|
}
|
||||||
|
|
||||||
|
func upgradeRuleSet(sourcePath string) error {
|
||||||
|
var (
|
||||||
|
reader io.Reader
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
if sourcePath == "stdin" {
|
||||||
|
reader = os.Stdin
|
||||||
|
} else {
|
||||||
|
reader, err = os.Open(sourcePath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
content, err := io.ReadAll(reader)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
plainRuleSetCompat, err := json.UnmarshalExtended[option.PlainRuleSetCompat](content)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch plainRuleSetCompat.Version {
|
||||||
|
case C.RuleSetVersion1:
|
||||||
|
default:
|
||||||
|
log.Info("already up-to-date")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
plainRuleSet, err := plainRuleSetCompat.Upgrade()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
buffer := new(bytes.Buffer)
|
||||||
|
encoder := json.NewEncoder(buffer)
|
||||||
|
encoder.SetIndent("", " ")
|
||||||
|
err = encoder.Encode(plainRuleSet)
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "encode config")
|
||||||
|
}
|
||||||
|
outputPath, _ := filepath.Abs(sourcePath)
|
||||||
|
if !commandRuleSetUpgradeFlagWrite || sourcePath == "stdin" {
|
||||||
|
os.Stdout.WriteString(buffer.String() + "\n")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if bytes.Equal(content, buffer.Bytes()) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
output, err := os.Create(sourcePath)
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "open output")
|
||||||
|
}
|
||||||
|
_, err = output.Write(buffer.Bytes())
|
||||||
|
output.Close()
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "write output")
|
||||||
|
}
|
||||||
|
os.Stderr.WriteString(outputPath + "\n")
|
||||||
|
return nil
|
||||||
|
}
|
@ -109,7 +109,7 @@ func readConfigAndMerge() (option.Options, error) {
|
|||||||
}
|
}
|
||||||
var mergedMessage json.RawMessage
|
var mergedMessage json.RawMessage
|
||||||
for _, options := range optionsList {
|
for _, options := range optionsList {
|
||||||
mergedMessage, err = badjson.MergeJSON(options.options.RawMessage, mergedMessage)
|
mergedMessage, err = badjson.MergeJSON(options.options.RawMessage, mergedMessage, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return option.Options{}, E.Cause(err, "merge config at ", options.path)
|
return option.Options{}, E.Cause(err, "merge config at ", options.path)
|
||||||
}
|
}
|
||||||
@ -188,9 +188,12 @@ func run() error {
|
|||||||
cancel()
|
cancel()
|
||||||
closeCtx, closed := context.WithCancel(context.Background())
|
closeCtx, closed := context.WithCancel(context.Background())
|
||||||
go closeMonitor(closeCtx)
|
go closeMonitor(closeCtx)
|
||||||
instance.Close()
|
err = instance.Close()
|
||||||
closed()
|
closed()
|
||||||
if osSignal != syscall.SIGHUP {
|
if osSignal != syscall.SIGHUP {
|
||||||
|
if err != nil {
|
||||||
|
log.Error(E.Cause(err, "sing-box did not closed properly"))
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
|
@ -9,8 +9,10 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
|
C "github.com/sagernet/sing-box/constant"
|
||||||
"github.com/sagernet/sing-box/log"
|
"github.com/sagernet/sing-box/log"
|
||||||
"github.com/sagernet/sing/common/bufio"
|
"github.com/sagernet/sing/common/bufio"
|
||||||
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
M "github.com/sagernet/sing/common/metadata"
|
M "github.com/sagernet/sing/common/metadata"
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
@ -32,7 +34,10 @@ func init() {
|
|||||||
commandTools.AddCommand(commandFetch)
|
commandTools.AddCommand(commandFetch)
|
||||||
}
|
}
|
||||||
|
|
||||||
var httpClient *http.Client
|
var (
|
||||||
|
httpClient *http.Client
|
||||||
|
http3Client *http.Client
|
||||||
|
)
|
||||||
|
|
||||||
func fetch(args []string) error {
|
func fetch(args []string) error {
|
||||||
instance, err := createPreStartedClient()
|
instance, err := createPreStartedClient()
|
||||||
@ -53,8 +58,16 @@ func fetch(args []string) error {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
defer httpClient.CloseIdleConnections()
|
defer httpClient.CloseIdleConnections()
|
||||||
|
if C.WithQUIC {
|
||||||
|
err = initializeHTTP3Client(instance)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer http3Client.CloseIdleConnections()
|
||||||
|
}
|
||||||
for _, urlString := range args {
|
for _, urlString := range args {
|
||||||
parsedURL, err := url.Parse(urlString)
|
var parsedURL *url.URL
|
||||||
|
parsedURL, err = url.Parse(urlString)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -63,16 +76,27 @@ func fetch(args []string) error {
|
|||||||
parsedURL.Scheme = "http"
|
parsedURL.Scheme = "http"
|
||||||
fallthrough
|
fallthrough
|
||||||
case "http", "https":
|
case "http", "https":
|
||||||
err = fetchHTTP(parsedURL)
|
err = fetchHTTP(httpClient, parsedURL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
case "http3":
|
||||||
|
if !C.WithQUIC {
|
||||||
|
return C.ErrQUICNotIncluded
|
||||||
|
}
|
||||||
|
parsedURL.Scheme = "https"
|
||||||
|
err = fetchHTTP(http3Client, parsedURL)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return E.New("unsupported scheme: ", parsedURL.Scheme)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func fetchHTTP(parsedURL *url.URL) error {
|
func fetchHTTP(httpClient *http.Client, parsedURL *url.URL) error {
|
||||||
request, err := http.NewRequest("GET", parsedURL.String(), nil)
|
request, err := http.NewRequest("GET", parsedURL.String(), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
36
cmd/sing-box/cmd_tools_fetch_http3.go
Normal file
36
cmd/sing-box/cmd_tools_fetch_http3.go
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
//go:build with_quic
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/tls"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/sagernet/quic-go"
|
||||||
|
"github.com/sagernet/quic-go/http3"
|
||||||
|
box "github.com/sagernet/sing-box"
|
||||||
|
"github.com/sagernet/sing/common/bufio"
|
||||||
|
M "github.com/sagernet/sing/common/metadata"
|
||||||
|
N "github.com/sagernet/sing/common/network"
|
||||||
|
)
|
||||||
|
|
||||||
|
func initializeHTTP3Client(instance *box.Box) error {
|
||||||
|
dialer, err := createDialer(instance, N.NetworkUDP, commandToolsFlagOutbound)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
http3Client = &http.Client{
|
||||||
|
Transport: &http3.RoundTripper{
|
||||||
|
Dial: func(ctx context.Context, addr string, tlsCfg *tls.Config, cfg *quic.Config) (quic.EarlyConnection, error) {
|
||||||
|
destination := M.ParseSocksaddr(addr)
|
||||||
|
udpConn, dErr := dialer.DialContext(ctx, N.NetworkUDP, destination)
|
||||||
|
if dErr != nil {
|
||||||
|
return nil, dErr
|
||||||
|
}
|
||||||
|
return quic.DialEarly(ctx, bufio.NewUnbindPacketConn(udpConn), udpConn.RemoteAddr(), tlsCfg, cfg)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
18
cmd/sing-box/cmd_tools_fetch_http3_stub.go
Normal file
18
cmd/sing-box/cmd_tools_fetch_http3_stub.go
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
//go:build !with_quic
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
box "github.com/sagernet/sing-box"
|
||||||
|
)
|
||||||
|
|
||||||
|
func initializeHTTP3Client(instance *box.Box) error {
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
|
||||||
|
func fetchHTTP3(parsedURL *url.URL) error {
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
346
cmd/sing-box/internal/convertor/adguard/convertor.go
Normal file
346
cmd/sing-box/internal/convertor/adguard/convertor.go
Normal file
@ -0,0 +1,346 @@
|
|||||||
|
package adguard
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"io"
|
||||||
|
"net/netip"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
C "github.com/sagernet/sing-box/constant"
|
||||||
|
"github.com/sagernet/sing-box/log"
|
||||||
|
"github.com/sagernet/sing-box/option"
|
||||||
|
"github.com/sagernet/sing/common"
|
||||||
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
|
M "github.com/sagernet/sing/common/metadata"
|
||||||
|
)
|
||||||
|
|
||||||
|
type agdguardRuleLine struct {
|
||||||
|
ruleLine string
|
||||||
|
isRawDomain bool
|
||||||
|
isExclude bool
|
||||||
|
isSuffix bool
|
||||||
|
hasStart bool
|
||||||
|
hasEnd bool
|
||||||
|
isRegexp bool
|
||||||
|
isImportant bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func Convert(reader io.Reader) ([]option.HeadlessRule, error) {
|
||||||
|
scanner := bufio.NewScanner(reader)
|
||||||
|
var (
|
||||||
|
ruleLines []agdguardRuleLine
|
||||||
|
ignoredLines int
|
||||||
|
)
|
||||||
|
parseLine:
|
||||||
|
for scanner.Scan() {
|
||||||
|
ruleLine := scanner.Text()
|
||||||
|
if ruleLine == "" || ruleLine[0] == '!' || ruleLine[0] == '#' {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
originRuleLine := ruleLine
|
||||||
|
if M.IsDomainName(ruleLine) {
|
||||||
|
ruleLines = append(ruleLines, agdguardRuleLine{
|
||||||
|
ruleLine: ruleLine,
|
||||||
|
isRawDomain: true,
|
||||||
|
})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
hostLine, err := parseAdGuardHostLine(ruleLine)
|
||||||
|
if err == nil {
|
||||||
|
if hostLine != "" {
|
||||||
|
ruleLines = append(ruleLines, agdguardRuleLine{
|
||||||
|
ruleLine: hostLine,
|
||||||
|
isRawDomain: true,
|
||||||
|
hasStart: true,
|
||||||
|
hasEnd: true,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if strings.HasSuffix(ruleLine, "|") {
|
||||||
|
ruleLine = ruleLine[:len(ruleLine)-1]
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
isExclude bool
|
||||||
|
isSuffix bool
|
||||||
|
hasStart bool
|
||||||
|
hasEnd bool
|
||||||
|
isRegexp bool
|
||||||
|
isImportant bool
|
||||||
|
)
|
||||||
|
if !strings.HasPrefix(ruleLine, "/") && strings.Contains(ruleLine, "$") {
|
||||||
|
params := common.SubstringAfter(ruleLine, "$")
|
||||||
|
for _, param := range strings.Split(params, ",") {
|
||||||
|
paramParts := strings.Split(param, "=")
|
||||||
|
var ignored bool
|
||||||
|
if len(paramParts) > 0 && len(paramParts) <= 2 {
|
||||||
|
switch paramParts[0] {
|
||||||
|
case "app", "network":
|
||||||
|
// maybe support by package_name/process_name
|
||||||
|
case "dnstype":
|
||||||
|
// maybe support by query_type
|
||||||
|
case "important":
|
||||||
|
ignored = true
|
||||||
|
isImportant = true
|
||||||
|
case "dnsrewrite":
|
||||||
|
if len(paramParts) == 2 && M.ParseAddr(paramParts[1]).IsUnspecified() {
|
||||||
|
ignored = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !ignored {
|
||||||
|
ignoredLines++
|
||||||
|
log.Debug("ignored unsupported rule with modifier: ", paramParts[0], ": ", ruleLine)
|
||||||
|
continue parseLine
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ruleLine = common.SubstringBefore(ruleLine, "$")
|
||||||
|
}
|
||||||
|
if strings.HasPrefix(ruleLine, "@@") {
|
||||||
|
ruleLine = ruleLine[2:]
|
||||||
|
isExclude = true
|
||||||
|
}
|
||||||
|
if strings.HasSuffix(ruleLine, "|") {
|
||||||
|
ruleLine = ruleLine[:len(ruleLine)-1]
|
||||||
|
}
|
||||||
|
if strings.HasPrefix(ruleLine, "||") {
|
||||||
|
ruleLine = ruleLine[2:]
|
||||||
|
isSuffix = true
|
||||||
|
} else if strings.HasPrefix(ruleLine, "|") {
|
||||||
|
ruleLine = ruleLine[1:]
|
||||||
|
hasStart = true
|
||||||
|
}
|
||||||
|
if strings.HasSuffix(ruleLine, "^") {
|
||||||
|
ruleLine = ruleLine[:len(ruleLine)-1]
|
||||||
|
hasEnd = true
|
||||||
|
}
|
||||||
|
if strings.HasPrefix(ruleLine, "/") && strings.HasSuffix(ruleLine, "/") {
|
||||||
|
ruleLine = ruleLine[1 : len(ruleLine)-1]
|
||||||
|
if ignoreIPCIDRRegexp(ruleLine) {
|
||||||
|
ignoredLines++
|
||||||
|
log.Debug("ignored unsupported rule with IPCIDR regexp: ", ruleLine)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
isRegexp = true
|
||||||
|
} else {
|
||||||
|
if strings.Contains(ruleLine, "://") {
|
||||||
|
ruleLine = common.SubstringAfter(ruleLine, "://")
|
||||||
|
}
|
||||||
|
if strings.Contains(ruleLine, "/") {
|
||||||
|
ignoredLines++
|
||||||
|
log.Debug("ignored unsupported rule with path: ", ruleLine)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if strings.Contains(ruleLine, "##") {
|
||||||
|
ignoredLines++
|
||||||
|
log.Debug("ignored unsupported rule with element hiding: ", ruleLine)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if strings.Contains(ruleLine, "#$#") {
|
||||||
|
ignoredLines++
|
||||||
|
log.Debug("ignored unsupported rule with element hiding: ", ruleLine)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
var domainCheck string
|
||||||
|
if strings.HasPrefix(ruleLine, ".") || strings.HasPrefix(ruleLine, "-") {
|
||||||
|
domainCheck = "r" + ruleLine
|
||||||
|
} else {
|
||||||
|
domainCheck = ruleLine
|
||||||
|
}
|
||||||
|
if ruleLine == "" {
|
||||||
|
ignoredLines++
|
||||||
|
log.Debug("ignored unsupported rule with empty domain", originRuleLine)
|
||||||
|
continue
|
||||||
|
} else {
|
||||||
|
domainCheck = strings.ReplaceAll(domainCheck, "*", "x")
|
||||||
|
if !M.IsDomainName(domainCheck) {
|
||||||
|
_, ipErr := parseADGuardIPCIDRLine(ruleLine)
|
||||||
|
if ipErr == nil {
|
||||||
|
ignoredLines++
|
||||||
|
log.Debug("ignored unsupported rule with IPCIDR: ", ruleLine)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if M.ParseSocksaddr(domainCheck).Port != 0 {
|
||||||
|
log.Debug("ignored unsupported rule with port: ", ruleLine)
|
||||||
|
} else {
|
||||||
|
log.Debug("ignored unsupported rule with invalid domain: ", ruleLine)
|
||||||
|
}
|
||||||
|
ignoredLines++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ruleLines = append(ruleLines, agdguardRuleLine{
|
||||||
|
ruleLine: ruleLine,
|
||||||
|
isExclude: isExclude,
|
||||||
|
isSuffix: isSuffix,
|
||||||
|
hasStart: hasStart,
|
||||||
|
hasEnd: hasEnd,
|
||||||
|
isRegexp: isRegexp,
|
||||||
|
isImportant: isImportant,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if len(ruleLines) == 0 {
|
||||||
|
return nil, E.New("AdGuard rule-set is empty or all rules are unsupported")
|
||||||
|
}
|
||||||
|
if common.All(ruleLines, func(it agdguardRuleLine) bool {
|
||||||
|
return it.isRawDomain
|
||||||
|
}) {
|
||||||
|
return []option.HeadlessRule{
|
||||||
|
{
|
||||||
|
Type: C.RuleTypeDefault,
|
||||||
|
DefaultOptions: option.DefaultHeadlessRule{
|
||||||
|
Domain: common.Map(ruleLines, func(it agdguardRuleLine) string {
|
||||||
|
return it.ruleLine
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
mapDomain := func(it agdguardRuleLine) string {
|
||||||
|
ruleLine := it.ruleLine
|
||||||
|
if it.isSuffix {
|
||||||
|
ruleLine = "||" + ruleLine
|
||||||
|
} else if it.hasStart {
|
||||||
|
ruleLine = "|" + ruleLine
|
||||||
|
}
|
||||||
|
if it.hasEnd {
|
||||||
|
ruleLine += "^"
|
||||||
|
}
|
||||||
|
return ruleLine
|
||||||
|
}
|
||||||
|
|
||||||
|
importantDomain := common.Map(common.Filter(ruleLines, func(it agdguardRuleLine) bool { return it.isImportant && !it.isRegexp && !it.isExclude }), mapDomain)
|
||||||
|
importantDomainRegex := common.Map(common.Filter(ruleLines, func(it agdguardRuleLine) bool { return it.isImportant && it.isRegexp && !it.isExclude }), mapDomain)
|
||||||
|
importantExcludeDomain := common.Map(common.Filter(ruleLines, func(it agdguardRuleLine) bool { return it.isImportant && !it.isRegexp && it.isExclude }), mapDomain)
|
||||||
|
importantExcludeDomainRegex := common.Map(common.Filter(ruleLines, func(it agdguardRuleLine) bool { return it.isImportant && it.isRegexp && it.isExclude }), mapDomain)
|
||||||
|
domain := common.Map(common.Filter(ruleLines, func(it agdguardRuleLine) bool { return !it.isImportant && !it.isRegexp && !it.isExclude }), mapDomain)
|
||||||
|
domainRegex := common.Map(common.Filter(ruleLines, func(it agdguardRuleLine) bool { return !it.isImportant && it.isRegexp && !it.isExclude }), mapDomain)
|
||||||
|
excludeDomain := common.Map(common.Filter(ruleLines, func(it agdguardRuleLine) bool { return !it.isImportant && !it.isRegexp && it.isExclude }), mapDomain)
|
||||||
|
excludeDomainRegex := common.Map(common.Filter(ruleLines, func(it agdguardRuleLine) bool { return !it.isImportant && it.isRegexp && it.isExclude }), mapDomain)
|
||||||
|
currentRule := option.HeadlessRule{
|
||||||
|
Type: C.RuleTypeDefault,
|
||||||
|
DefaultOptions: option.DefaultHeadlessRule{
|
||||||
|
AdGuardDomain: domain,
|
||||||
|
DomainRegex: domainRegex,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if len(excludeDomain) > 0 || len(excludeDomainRegex) > 0 {
|
||||||
|
currentRule = option.HeadlessRule{
|
||||||
|
Type: C.RuleTypeLogical,
|
||||||
|
LogicalOptions: option.LogicalHeadlessRule{
|
||||||
|
Mode: C.LogicalTypeAnd,
|
||||||
|
Rules: []option.HeadlessRule{
|
||||||
|
{
|
||||||
|
Type: C.RuleTypeDefault,
|
||||||
|
DefaultOptions: option.DefaultHeadlessRule{
|
||||||
|
AdGuardDomain: excludeDomain,
|
||||||
|
DomainRegex: excludeDomainRegex,
|
||||||
|
Invert: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
currentRule,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(importantDomain) > 0 || len(importantDomainRegex) > 0 {
|
||||||
|
currentRule = option.HeadlessRule{
|
||||||
|
Type: C.RuleTypeLogical,
|
||||||
|
LogicalOptions: option.LogicalHeadlessRule{
|
||||||
|
Mode: C.LogicalTypeOr,
|
||||||
|
Rules: []option.HeadlessRule{
|
||||||
|
{
|
||||||
|
Type: C.RuleTypeDefault,
|
||||||
|
DefaultOptions: option.DefaultHeadlessRule{
|
||||||
|
AdGuardDomain: importantDomain,
|
||||||
|
DomainRegex: importantDomainRegex,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
currentRule,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(importantExcludeDomain) > 0 || len(importantExcludeDomainRegex) > 0 {
|
||||||
|
currentRule = option.HeadlessRule{
|
||||||
|
Type: C.RuleTypeLogical,
|
||||||
|
LogicalOptions: option.LogicalHeadlessRule{
|
||||||
|
Mode: C.LogicalTypeAnd,
|
||||||
|
Rules: []option.HeadlessRule{
|
||||||
|
{
|
||||||
|
Type: C.RuleTypeDefault,
|
||||||
|
DefaultOptions: option.DefaultHeadlessRule{
|
||||||
|
AdGuardDomain: importantExcludeDomain,
|
||||||
|
DomainRegex: importantExcludeDomainRegex,
|
||||||
|
Invert: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
currentRule,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log.Info("parsed rules: ", len(ruleLines), "/", len(ruleLines)+ignoredLines)
|
||||||
|
return []option.HeadlessRule{currentRule}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func ignoreIPCIDRRegexp(ruleLine string) bool {
|
||||||
|
if strings.HasPrefix(ruleLine, "(http?:\\/\\/)") {
|
||||||
|
ruleLine = ruleLine[12:]
|
||||||
|
} else if strings.HasPrefix(ruleLine, "(https?:\\/\\/)") {
|
||||||
|
ruleLine = ruleLine[13:]
|
||||||
|
} else if strings.HasPrefix(ruleLine, "^") {
|
||||||
|
ruleLine = ruleLine[1:]
|
||||||
|
} else {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
_, parseErr := strconv.ParseUint(common.SubstringBefore(ruleLine, "\\."), 10, 8)
|
||||||
|
return parseErr == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseAdGuardHostLine(ruleLine string) (string, error) {
|
||||||
|
idx := strings.Index(ruleLine, " ")
|
||||||
|
if idx == -1 {
|
||||||
|
return "", os.ErrInvalid
|
||||||
|
}
|
||||||
|
address, err := netip.ParseAddr(ruleLine[:idx])
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if !address.IsUnspecified() {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
domain := ruleLine[idx+1:]
|
||||||
|
if !M.IsDomainName(domain) {
|
||||||
|
return "", E.New("invalid domain name: ", domain)
|
||||||
|
}
|
||||||
|
return domain, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseADGuardIPCIDRLine(ruleLine string) (netip.Prefix, error) {
|
||||||
|
var isPrefix bool
|
||||||
|
if strings.HasSuffix(ruleLine, ".") {
|
||||||
|
isPrefix = true
|
||||||
|
ruleLine = ruleLine[:len(ruleLine)-1]
|
||||||
|
}
|
||||||
|
ruleStringParts := strings.Split(ruleLine, ".")
|
||||||
|
if len(ruleStringParts) > 4 || len(ruleStringParts) < 4 && !isPrefix {
|
||||||
|
return netip.Prefix{}, os.ErrInvalid
|
||||||
|
}
|
||||||
|
ruleParts := make([]uint8, 0, len(ruleStringParts))
|
||||||
|
for _, part := range ruleStringParts {
|
||||||
|
rulePart, err := strconv.ParseUint(part, 10, 8)
|
||||||
|
if err != nil {
|
||||||
|
return netip.Prefix{}, err
|
||||||
|
}
|
||||||
|
ruleParts = append(ruleParts, uint8(rulePart))
|
||||||
|
}
|
||||||
|
bitLen := len(ruleParts) * 8
|
||||||
|
for len(ruleParts) < 4 {
|
||||||
|
ruleParts = append(ruleParts, 0)
|
||||||
|
}
|
||||||
|
return netip.PrefixFrom(netip.AddrFrom4(*(*[4]byte)(ruleParts)), bitLen), nil
|
||||||
|
}
|
140
cmd/sing-box/internal/convertor/adguard/convertor_test.go
Normal file
140
cmd/sing-box/internal/convertor/adguard/convertor_test.go
Normal file
@ -0,0 +1,140 @@
|
|||||||
|
package adguard
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/sagernet/sing-box/adapter"
|
||||||
|
"github.com/sagernet/sing-box/route"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestConverter(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
rules, err := Convert(strings.NewReader(`
|
||||||
|
||example.org^
|
||||||
|
|example.com^
|
||||||
|
example.net^
|
||||||
|
||example.edu
|
||||||
|
||example.edu.tw^
|
||||||
|
|example.gov
|
||||||
|
example.arpa
|
||||||
|
@@|sagernet.example.org|
|
||||||
|
||sagernet.org^$important
|
||||||
|
@@|sing-box.sagernet.org^$important
|
||||||
|
`))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, rules, 1)
|
||||||
|
rule, err := route.NewHeadlessRule(nil, rules[0])
|
||||||
|
require.NoError(t, err)
|
||||||
|
matchDomain := []string{
|
||||||
|
"example.org",
|
||||||
|
"www.example.org",
|
||||||
|
"example.com",
|
||||||
|
"example.net",
|
||||||
|
"isexample.net",
|
||||||
|
"www.example.net",
|
||||||
|
"example.edu",
|
||||||
|
"example.edu.cn",
|
||||||
|
"example.edu.tw",
|
||||||
|
"www.example.edu",
|
||||||
|
"www.example.edu.cn",
|
||||||
|
"example.gov",
|
||||||
|
"example.gov.cn",
|
||||||
|
"example.arpa",
|
||||||
|
"www.example.arpa",
|
||||||
|
"isexample.arpa",
|
||||||
|
"example.arpa.cn",
|
||||||
|
"www.example.arpa.cn",
|
||||||
|
"isexample.arpa.cn",
|
||||||
|
"sagernet.org",
|
||||||
|
"www.sagernet.org",
|
||||||
|
}
|
||||||
|
notMatchDomain := []string{
|
||||||
|
"example.org.cn",
|
||||||
|
"notexample.org",
|
||||||
|
"example.com.cn",
|
||||||
|
"www.example.com.cn",
|
||||||
|
"example.net.cn",
|
||||||
|
"notexample.edu",
|
||||||
|
"notexample.edu.cn",
|
||||||
|
"www.example.gov",
|
||||||
|
"notexample.gov",
|
||||||
|
"sagernet.example.org",
|
||||||
|
"sing-box.sagernet.org",
|
||||||
|
}
|
||||||
|
for _, domain := range matchDomain {
|
||||||
|
require.True(t, rule.Match(&adapter.InboundContext{
|
||||||
|
Domain: domain,
|
||||||
|
}), domain)
|
||||||
|
}
|
||||||
|
for _, domain := range notMatchDomain {
|
||||||
|
require.False(t, rule.Match(&adapter.InboundContext{
|
||||||
|
Domain: domain,
|
||||||
|
}), domain)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHosts(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
rules, err := Convert(strings.NewReader(`
|
||||||
|
127.0.0.1 localhost
|
||||||
|
::1 localhost #[IPv6]
|
||||||
|
0.0.0.0 google.com
|
||||||
|
`))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, rules, 1)
|
||||||
|
rule, err := route.NewHeadlessRule(nil, rules[0])
|
||||||
|
require.NoError(t, err)
|
||||||
|
matchDomain := []string{
|
||||||
|
"google.com",
|
||||||
|
}
|
||||||
|
notMatchDomain := []string{
|
||||||
|
"www.google.com",
|
||||||
|
"notgoogle.com",
|
||||||
|
"localhost",
|
||||||
|
}
|
||||||
|
for _, domain := range matchDomain {
|
||||||
|
require.True(t, rule.Match(&adapter.InboundContext{
|
||||||
|
Domain: domain,
|
||||||
|
}), domain)
|
||||||
|
}
|
||||||
|
for _, domain := range notMatchDomain {
|
||||||
|
require.False(t, rule.Match(&adapter.InboundContext{
|
||||||
|
Domain: domain,
|
||||||
|
}), domain)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSimpleHosts(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
rules, err := Convert(strings.NewReader(`
|
||||||
|
example.com
|
||||||
|
www.example.org
|
||||||
|
`))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, rules, 1)
|
||||||
|
rule, err := route.NewHeadlessRule(nil, rules[0])
|
||||||
|
require.NoError(t, err)
|
||||||
|
matchDomain := []string{
|
||||||
|
"example.com",
|
||||||
|
"www.example.org",
|
||||||
|
}
|
||||||
|
notMatchDomain := []string{
|
||||||
|
"example.com.cn",
|
||||||
|
"www.example.com",
|
||||||
|
"notexample.com",
|
||||||
|
"example.org",
|
||||||
|
}
|
||||||
|
for _, domain := range matchDomain {
|
||||||
|
require.True(t, rule.Match(&adapter.InboundContext{
|
||||||
|
Domain: domain,
|
||||||
|
}), domain)
|
||||||
|
}
|
||||||
|
for _, domain := range notMatchDomain {
|
||||||
|
require.False(t, rule.Match(&adapter.InboundContext{
|
||||||
|
Domain: domain,
|
||||||
|
}), domain)
|
||||||
|
}
|
||||||
|
}
|
@ -50,12 +50,26 @@ func NewDefault(router adapter.Router, options option.DialerOptions) (*DefaultDi
|
|||||||
dialer.Control = control.Append(dialer.Control, bindFunc)
|
dialer.Control = control.Append(dialer.Control, bindFunc)
|
||||||
listener.Control = control.Append(listener.Control, bindFunc)
|
listener.Control = control.Append(listener.Control, bindFunc)
|
||||||
}
|
}
|
||||||
if options.RoutingMark != 0 {
|
var autoRedirectOutputMark uint32
|
||||||
|
if router != nil {
|
||||||
|
autoRedirectOutputMark = router.AutoRedirectOutputMark()
|
||||||
|
}
|
||||||
|
if autoRedirectOutputMark > 0 {
|
||||||
|
dialer.Control = control.Append(dialer.Control, control.RoutingMark(autoRedirectOutputMark))
|
||||||
|
listener.Control = control.Append(listener.Control, control.RoutingMark(autoRedirectOutputMark))
|
||||||
|
}
|
||||||
|
if options.RoutingMark > 0 {
|
||||||
dialer.Control = control.Append(dialer.Control, control.RoutingMark(options.RoutingMark))
|
dialer.Control = control.Append(dialer.Control, control.RoutingMark(options.RoutingMark))
|
||||||
listener.Control = control.Append(listener.Control, control.RoutingMark(options.RoutingMark))
|
listener.Control = control.Append(listener.Control, control.RoutingMark(options.RoutingMark))
|
||||||
} else if router != nil && router.DefaultMark() != 0 {
|
if autoRedirectOutputMark > 0 {
|
||||||
|
return nil, E.New("`auto_redirect` with `route_[_exclude]_address_set is conflict with `routing_mark`")
|
||||||
|
}
|
||||||
|
} else if router != nil && router.DefaultMark() > 0 {
|
||||||
dialer.Control = control.Append(dialer.Control, control.RoutingMark(router.DefaultMark()))
|
dialer.Control = control.Append(dialer.Control, control.RoutingMark(router.DefaultMark()))
|
||||||
listener.Control = control.Append(listener.Control, control.RoutingMark(router.DefaultMark()))
|
listener.Control = control.Append(listener.Control, control.RoutingMark(router.DefaultMark()))
|
||||||
|
if autoRedirectOutputMark > 0 {
|
||||||
|
return nil, E.New("`auto_redirect` with `route_[_exclude]_address_set is conflict with `default_mark`")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if options.ReuseAddr {
|
if options.ReuseAddr {
|
||||||
listener.Control = control.Append(listener.Control, control.ReuseAddr())
|
listener.Control = control.Append(listener.Control, control.ReuseAddr())
|
||||||
|
@ -1,17 +1,24 @@
|
|||||||
package geosite
|
package geosite
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bufio"
|
||||||
|
"encoding/binary"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
|
||||||
E "github.com/sagernet/sing/common/exceptions"
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
"github.com/sagernet/sing/common/rw"
|
"github.com/sagernet/sing/common/varbin"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Reader struct {
|
type Reader struct {
|
||||||
reader io.ReadSeeker
|
access sync.Mutex
|
||||||
domainIndex map[string]int
|
reader io.ReadSeeker
|
||||||
domainLength map[string]int
|
bufferedReader *bufio.Reader
|
||||||
|
metadataIndex int64
|
||||||
|
domainIndex map[string]int
|
||||||
|
domainLength map[string]int
|
||||||
}
|
}
|
||||||
|
|
||||||
func Open(path string) (*Reader, []string, error) {
|
func Open(path string) (*Reader, []string, error) {
|
||||||
@ -34,15 +41,23 @@ func Open(path string) (*Reader, []string, error) {
|
|||||||
return reader, codes, nil
|
return reader, codes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type geositeMetadata struct {
|
||||||
|
Code string
|
||||||
|
Index uint64
|
||||||
|
Length uint64
|
||||||
|
}
|
||||||
|
|
||||||
func (r *Reader) readMetadata() error {
|
func (r *Reader) readMetadata() error {
|
||||||
version, err := rw.ReadByte(r.reader)
|
counter := &readCounter{Reader: r.reader}
|
||||||
|
reader := bufio.NewReader(counter)
|
||||||
|
version, err := reader.ReadByte()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if version != 0 {
|
if version != 0 {
|
||||||
return E.New("unknown version")
|
return E.New("unknown version")
|
||||||
}
|
}
|
||||||
entryLength, err := rw.ReadUVariant(r.reader)
|
entryLength, err := binary.ReadUvarint(reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -55,16 +70,16 @@ func (r *Reader) readMetadata() error {
|
|||||||
codeIndex uint64
|
codeIndex uint64
|
||||||
codeLength uint64
|
codeLength uint64
|
||||||
)
|
)
|
||||||
code, err = rw.ReadVString(r.reader)
|
code, err = varbin.ReadValue[string](reader, binary.BigEndian)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
keys[i] = code
|
keys[i] = code
|
||||||
codeIndex, err = rw.ReadUVariant(r.reader)
|
codeIndex, err = binary.ReadUvarint(reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
codeLength, err = rw.ReadUVariant(r.reader)
|
codeLength, err = binary.ReadUvarint(reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -73,6 +88,8 @@ func (r *Reader) readMetadata() error {
|
|||||||
}
|
}
|
||||||
r.domainIndex = domainIndex
|
r.domainIndex = domainIndex
|
||||||
r.domainLength = domainLength
|
r.domainLength = domainLength
|
||||||
|
r.metadataIndex = counter.count - int64(reader.Buffered())
|
||||||
|
r.bufferedReader = reader
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -81,31 +98,32 @@ func (r *Reader) Read(code string) ([]Item, error) {
|
|||||||
if !exists {
|
if !exists {
|
||||||
return nil, E.New("code ", code, " not exists!")
|
return nil, E.New("code ", code, " not exists!")
|
||||||
}
|
}
|
||||||
_, err := r.reader.Seek(int64(index), io.SeekCurrent)
|
_, err := r.reader.Seek(r.metadataIndex+int64(index), io.SeekStart)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
counter := &rw.ReadCounter{Reader: r.reader}
|
r.bufferedReader.Reset(r.reader)
|
||||||
domain := make([]Item, r.domainLength[code])
|
itemList := make([]Item, r.domainLength[code])
|
||||||
for i := range domain {
|
err = varbin.Read(r.bufferedReader, binary.BigEndian, &itemList)
|
||||||
var (
|
if err != nil {
|
||||||
item Item
|
return nil, err
|
||||||
err error
|
|
||||||
)
|
|
||||||
item.Type, err = rw.ReadByte(counter)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
item.Value, err = rw.ReadVString(counter)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
domain[i] = item
|
|
||||||
}
|
}
|
||||||
_, err = r.reader.Seek(int64(-index)-counter.Count(), io.SeekCurrent)
|
return itemList, nil
|
||||||
return domain, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Reader) Upstream() any {
|
func (r *Reader) Upstream() any {
|
||||||
return r.reader
|
return r.reader
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type readCounter struct {
|
||||||
|
io.Reader
|
||||||
|
count int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *readCounter) Read(p []byte) (n int, err error) {
|
||||||
|
n, err = r.Reader.Read(p)
|
||||||
|
if n > 0 {
|
||||||
|
atomic.AddInt64(&r.count, int64(n))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
@ -2,13 +2,13 @@ package geosite
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"io"
|
"encoding/binary"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
"github.com/sagernet/sing/common/rw"
|
"github.com/sagernet/sing/common/varbin"
|
||||||
)
|
)
|
||||||
|
|
||||||
func Write(writer io.Writer, domains map[string][]Item) error {
|
func Write(writer varbin.Writer, domains map[string][]Item) error {
|
||||||
keys := make([]string, 0, len(domains))
|
keys := make([]string, 0, len(domains))
|
||||||
for code := range domains {
|
for code := range domains {
|
||||||
keys = append(keys, code)
|
keys = append(keys, code)
|
||||||
@ -19,35 +19,32 @@ func Write(writer io.Writer, domains map[string][]Item) error {
|
|||||||
index := make(map[string]int)
|
index := make(map[string]int)
|
||||||
for _, code := range keys {
|
for _, code := range keys {
|
||||||
index[code] = content.Len()
|
index[code] = content.Len()
|
||||||
for _, domain := range domains[code] {
|
err := varbin.Write(content, binary.BigEndian, domains[code])
|
||||||
content.WriteByte(domain.Type)
|
if err != nil {
|
||||||
err := rw.WriteVString(content, domain.Value)
|
return err
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err := rw.WriteByte(writer, 0)
|
err := writer.WriteByte(0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = rw.WriteUVariant(writer, uint64(len(keys)))
|
_, err = varbin.WriteUvarint(writer, uint64(len(keys)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, code := range keys {
|
for _, code := range keys {
|
||||||
err = rw.WriteVString(writer, code)
|
err = varbin.Write(writer, binary.BigEndian, code)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err = rw.WriteUVariant(writer, uint64(index[code]))
|
_, err = varbin.WriteUvarint(writer, uint64(index[code]))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err = rw.WriteUVariant(writer, uint64(len(domains[code])))
|
_, err = varbin.WriteUvarint(writer, uint64(len(domains[code])))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
29
common/ja3/LICENSE
Normal file
29
common/ja3/LICENSE
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
BSD 3-Clause License
|
||||||
|
|
||||||
|
Copyright (c) 2018, Open Systems AG
|
||||||
|
All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright notice, this
|
||||||
|
list of conditions and the following disclaimer.
|
||||||
|
|
||||||
|
* Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
this list of conditions and the following disclaimer in the documentation
|
||||||
|
and/or other materials provided with the distribution.
|
||||||
|
|
||||||
|
* Neither the name of the copyright holder nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||||
|
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||||
|
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||||
|
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||||
|
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||||
|
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||||
|
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||||
|
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
3
common/ja3/README.md
Normal file
3
common/ja3/README.md
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
# JA3
|
||||||
|
|
||||||
|
mod from: https://github.com/open-ch/ja3
|
31
common/ja3/error.go
Normal file
31
common/ja3/error.go
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
// Copyright (c) 2018, Open Systems AG. All rights reserved.
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by a BSD-style license
|
||||||
|
// that can be found in the LICENSE file in the root of the source
|
||||||
|
// tree.
|
||||||
|
|
||||||
|
package ja3
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
// Error types
|
||||||
|
const (
|
||||||
|
LengthErr string = "length check %v failed"
|
||||||
|
ContentTypeErr string = "content type not matching"
|
||||||
|
VersionErr string = "version check %v failed"
|
||||||
|
HandshakeTypeErr string = "handshake type not matching"
|
||||||
|
SNITypeErr string = "SNI type not supported"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ParseError can be encountered while parsing a segment
|
||||||
|
type ParseError struct {
|
||||||
|
errType string
|
||||||
|
check int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ParseError) Error() string {
|
||||||
|
if e.errType == LengthErr || e.errType == VersionErr {
|
||||||
|
return fmt.Sprintf(e.errType, e.check)
|
||||||
|
}
|
||||||
|
return fmt.Sprint(e.errType)
|
||||||
|
}
|
83
common/ja3/ja3.go
Normal file
83
common/ja3/ja3.go
Normal file
@ -0,0 +1,83 @@
|
|||||||
|
// Copyright (c) 2018, Open Systems AG. All rights reserved.
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by a BSD-style license
|
||||||
|
// that can be found in the LICENSE file in the root of the source
|
||||||
|
// tree.
|
||||||
|
|
||||||
|
package ja3
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/md5"
|
||||||
|
"encoding/hex"
|
||||||
|
|
||||||
|
"golang.org/x/exp/slices"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ClientHello struct {
|
||||||
|
Version uint16
|
||||||
|
CipherSuites []uint16
|
||||||
|
Extensions []uint16
|
||||||
|
EllipticCurves []uint16
|
||||||
|
EllipticCurvePF []uint8
|
||||||
|
Versions []uint16
|
||||||
|
SignatureAlgorithms []uint16
|
||||||
|
ServerName string
|
||||||
|
ja3ByteString []byte
|
||||||
|
ja3Hash string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *ClientHello) Equals(another *ClientHello, ignoreExtensionsSequence bool) bool {
|
||||||
|
if j.Version != another.Version {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !slices.Equal(j.CipherSuites, another.CipherSuites) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !ignoreExtensionsSequence && !slices.Equal(j.Extensions, another.Extensions) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if ignoreExtensionsSequence && !slices.Equal(j.Extensions, another.sortedExtensions()) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !slices.Equal(j.EllipticCurves, another.EllipticCurves) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !slices.Equal(j.EllipticCurvePF, another.EllipticCurvePF) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !slices.Equal(j.SignatureAlgorithms, another.SignatureAlgorithms) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *ClientHello) sortedExtensions() []uint16 {
|
||||||
|
extensions := make([]uint16, len(j.Extensions))
|
||||||
|
copy(extensions, j.Extensions)
|
||||||
|
slices.Sort(extensions)
|
||||||
|
return extensions
|
||||||
|
}
|
||||||
|
|
||||||
|
func Compute(payload []byte) (*ClientHello, error) {
|
||||||
|
ja3 := ClientHello{}
|
||||||
|
err := ja3.parseSegment(payload)
|
||||||
|
return &ja3, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *ClientHello) String() string {
|
||||||
|
if j.ja3ByteString == nil {
|
||||||
|
j.marshalJA3()
|
||||||
|
}
|
||||||
|
return string(j.ja3ByteString)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *ClientHello) Hash() string {
|
||||||
|
if j.ja3ByteString == nil {
|
||||||
|
j.marshalJA3()
|
||||||
|
}
|
||||||
|
if j.ja3Hash == "" {
|
||||||
|
h := md5.Sum(j.ja3ByteString)
|
||||||
|
j.ja3Hash = hex.EncodeToString(h[:])
|
||||||
|
}
|
||||||
|
return j.ja3Hash
|
||||||
|
}
|
357
common/ja3/parser.go
Normal file
357
common/ja3/parser.go
Normal file
@ -0,0 +1,357 @@
|
|||||||
|
// Copyright (c) 2018, Open Systems AG. All rights reserved.
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by a BSD-style license
|
||||||
|
// that can be found in the LICENSE file in the root of the source
|
||||||
|
// tree.
|
||||||
|
|
||||||
|
package ja3
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Constants used for parsing
|
||||||
|
recordLayerHeaderLen int = 5
|
||||||
|
handshakeHeaderLen int = 6
|
||||||
|
randomDataLen int = 32
|
||||||
|
sessionIDHeaderLen int = 1
|
||||||
|
cipherSuiteHeaderLen int = 2
|
||||||
|
compressMethodHeaderLen int = 1
|
||||||
|
extensionsHeaderLen int = 2
|
||||||
|
extensionHeaderLen int = 4
|
||||||
|
sniExtensionHeaderLen int = 5
|
||||||
|
ecExtensionHeaderLen int = 2
|
||||||
|
ecpfExtensionHeaderLen int = 1
|
||||||
|
versionExtensionHeaderLen int = 1
|
||||||
|
signatureAlgorithmsExtensionHeaderLen int = 2
|
||||||
|
contentType uint8 = 22
|
||||||
|
handshakeType uint8 = 1
|
||||||
|
sniExtensionType uint16 = 0
|
||||||
|
sniNameDNSHostnameType uint8 = 0
|
||||||
|
ecExtensionType uint16 = 10
|
||||||
|
ecpfExtensionType uint16 = 11
|
||||||
|
versionExtensionType uint16 = 43
|
||||||
|
signatureAlgorithmsExtensionType uint16 = 13
|
||||||
|
|
||||||
|
// Versions
|
||||||
|
// The bitmask covers the versions SSL3.0 to TLS1.2
|
||||||
|
tlsVersionBitmask uint16 = 0xFFFC
|
||||||
|
tls13 uint16 = 0x0304
|
||||||
|
|
||||||
|
// GREASE values
|
||||||
|
// The bitmask covers all GREASE values
|
||||||
|
GreaseBitmask uint16 = 0x0F0F
|
||||||
|
|
||||||
|
// Constants used for marshalling
|
||||||
|
dashByte = byte(45)
|
||||||
|
commaByte = byte(44)
|
||||||
|
)
|
||||||
|
|
||||||
|
// parseSegment to populate the corresponding ClientHello object or return an error
|
||||||
|
func (j *ClientHello) parseSegment(segment []byte) error {
|
||||||
|
// Check if we can decode the next fields
|
||||||
|
if len(segment) < recordLayerHeaderLen {
|
||||||
|
return &ParseError{LengthErr, 1}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if we have "Content Type: Handshake (22)"
|
||||||
|
contType := uint8(segment[0])
|
||||||
|
if contType != contentType {
|
||||||
|
return &ParseError{errType: ContentTypeErr}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if TLS record layer version is supported
|
||||||
|
tlsRecordVersion := uint16(segment[1])<<8 | uint16(segment[2])
|
||||||
|
if tlsRecordVersion&tlsVersionBitmask != 0x0300 && tlsRecordVersion != tls13 {
|
||||||
|
return &ParseError{VersionErr, 1}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the Handshake is as long as expected from the length field
|
||||||
|
segmentLen := uint16(segment[3])<<8 | uint16(segment[4])
|
||||||
|
if len(segment[recordLayerHeaderLen:]) < int(segmentLen) {
|
||||||
|
return &ParseError{LengthErr, 2}
|
||||||
|
}
|
||||||
|
// Keep the Handshake messege, ignore any additional following record types
|
||||||
|
hs := segment[recordLayerHeaderLen : recordLayerHeaderLen+int(segmentLen)]
|
||||||
|
|
||||||
|
err := j.parseHandshake(hs)
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseHandshake body
|
||||||
|
func (j *ClientHello) parseHandshake(hs []byte) error {
|
||||||
|
// Check if we can decode the next fields
|
||||||
|
if len(hs) < handshakeHeaderLen+randomDataLen+sessionIDHeaderLen {
|
||||||
|
return &ParseError{LengthErr, 3}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if we have "Handshake Type: Client Hello (1)"
|
||||||
|
handshType := uint8(hs[0])
|
||||||
|
if handshType != handshakeType {
|
||||||
|
return &ParseError{errType: HandshakeTypeErr}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if actual length of handshake matches (this is a great exclusion criterion for false positives,
|
||||||
|
// as these fields have to match the actual length of the rest of the segment)
|
||||||
|
handshakeLen := uint32(hs[1])<<16 | uint32(hs[2])<<8 | uint32(hs[3])
|
||||||
|
if len(hs[4:]) != int(handshakeLen) {
|
||||||
|
return &ParseError{LengthErr, 4}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if Client Hello version is supported
|
||||||
|
tlsVersion := uint16(hs[4])<<8 | uint16(hs[5])
|
||||||
|
if tlsVersion&tlsVersionBitmask != 0x0300 && tlsVersion != tls13 {
|
||||||
|
return &ParseError{VersionErr, 2}
|
||||||
|
}
|
||||||
|
j.Version = tlsVersion
|
||||||
|
|
||||||
|
// Check if we can decode the next fields
|
||||||
|
sessionIDLen := uint8(hs[38])
|
||||||
|
if len(hs) < handshakeHeaderLen+randomDataLen+sessionIDHeaderLen+int(sessionIDLen) {
|
||||||
|
return &ParseError{LengthErr, 5}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cipher Suites
|
||||||
|
cs := hs[handshakeHeaderLen+randomDataLen+sessionIDHeaderLen+int(sessionIDLen):]
|
||||||
|
|
||||||
|
// Check if we can decode the next fields
|
||||||
|
if len(cs) < cipherSuiteHeaderLen {
|
||||||
|
return &ParseError{LengthErr, 6}
|
||||||
|
}
|
||||||
|
|
||||||
|
csLen := uint16(cs[0])<<8 | uint16(cs[1])
|
||||||
|
numCiphers := int(csLen / 2)
|
||||||
|
cipherSuites := make([]uint16, 0, numCiphers)
|
||||||
|
|
||||||
|
// Check if we can decode the next fields
|
||||||
|
if len(cs) < cipherSuiteHeaderLen+int(csLen)+compressMethodHeaderLen {
|
||||||
|
return &ParseError{LengthErr, 7}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < numCiphers; i++ {
|
||||||
|
cipherSuite := uint16(cs[2+i<<1])<<8 | uint16(cs[3+i<<1])
|
||||||
|
cipherSuites = append(cipherSuites, cipherSuite)
|
||||||
|
}
|
||||||
|
j.CipherSuites = cipherSuites
|
||||||
|
|
||||||
|
// Check if we can decode the next fields
|
||||||
|
compressMethodLen := uint16(cs[cipherSuiteHeaderLen+int(csLen)])
|
||||||
|
if len(cs) < cipherSuiteHeaderLen+int(csLen)+compressMethodHeaderLen+int(compressMethodLen) {
|
||||||
|
return &ParseError{LengthErr, 8}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extensions
|
||||||
|
exs := cs[cipherSuiteHeaderLen+int(csLen)+compressMethodHeaderLen+int(compressMethodLen):]
|
||||||
|
|
||||||
|
err := j.parseExtensions(exs)
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseExtensions of the handshake
|
||||||
|
func (j *ClientHello) parseExtensions(exs []byte) error {
|
||||||
|
// Check for no extensions, this fields header is nonexistent if no body is used
|
||||||
|
if len(exs) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if we can decode the next fields
|
||||||
|
if len(exs) < extensionsHeaderLen {
|
||||||
|
return &ParseError{LengthErr, 9}
|
||||||
|
}
|
||||||
|
|
||||||
|
exsLen := uint16(exs[0])<<8 | uint16(exs[1])
|
||||||
|
exs = exs[extensionsHeaderLen:]
|
||||||
|
|
||||||
|
// Check if we can decode the next fields
|
||||||
|
if len(exs) < int(exsLen) {
|
||||||
|
return &ParseError{LengthErr, 10}
|
||||||
|
}
|
||||||
|
|
||||||
|
var sni []byte
|
||||||
|
var extensions, ellipticCurves []uint16
|
||||||
|
var ellipticCurvePF []uint8
|
||||||
|
var versions []uint16
|
||||||
|
var signatureAlgorithms []uint16
|
||||||
|
for len(exs) > 0 {
|
||||||
|
|
||||||
|
// Check if we can decode the next fields
|
||||||
|
if len(exs) < extensionHeaderLen {
|
||||||
|
return &ParseError{LengthErr, 11}
|
||||||
|
}
|
||||||
|
|
||||||
|
exType := uint16(exs[0])<<8 | uint16(exs[1])
|
||||||
|
exLen := uint16(exs[2])<<8 | uint16(exs[3])
|
||||||
|
// Ignore any GREASE extensions
|
||||||
|
extensions = append(extensions, exType)
|
||||||
|
// Check if we can decode the next fields
|
||||||
|
if len(exs) < extensionHeaderLen+int(exLen) {
|
||||||
|
return &ParseError{LengthErr, 12}
|
||||||
|
}
|
||||||
|
|
||||||
|
sex := exs[extensionHeaderLen : extensionHeaderLen+int(exLen)]
|
||||||
|
|
||||||
|
switch exType {
|
||||||
|
case sniExtensionType: // Extensions: server_name
|
||||||
|
|
||||||
|
// Check if we can decode the next fields
|
||||||
|
if len(sex) < sniExtensionHeaderLen {
|
||||||
|
return &ParseError{LengthErr, 13}
|
||||||
|
}
|
||||||
|
|
||||||
|
sniType := uint8(sex[2])
|
||||||
|
sniLen := uint16(sex[3])<<8 | uint16(sex[4])
|
||||||
|
sex = sex[sniExtensionHeaderLen:]
|
||||||
|
|
||||||
|
// Check if we can decode the next fields
|
||||||
|
if len(sex) != int(sniLen) {
|
||||||
|
return &ParseError{LengthErr, 14}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch sniType {
|
||||||
|
case sniNameDNSHostnameType:
|
||||||
|
sni = sex
|
||||||
|
default:
|
||||||
|
return &ParseError{errType: SNITypeErr}
|
||||||
|
}
|
||||||
|
case ecExtensionType: // Extensions: supported_groups
|
||||||
|
|
||||||
|
// Check if we can decode the next fields
|
||||||
|
if len(sex) < ecExtensionHeaderLen {
|
||||||
|
return &ParseError{LengthErr, 15}
|
||||||
|
}
|
||||||
|
|
||||||
|
ecsLen := uint16(sex[0])<<8 | uint16(sex[1])
|
||||||
|
numCurves := int(ecsLen / 2)
|
||||||
|
ellipticCurves = make([]uint16, 0, numCurves)
|
||||||
|
sex = sex[ecExtensionHeaderLen:]
|
||||||
|
|
||||||
|
// Check if we can decode the next fields
|
||||||
|
if len(sex) != int(ecsLen) {
|
||||||
|
return &ParseError{LengthErr, 16}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < numCurves; i++ {
|
||||||
|
ecType := uint16(sex[i*2])<<8 | uint16(sex[1+i*2])
|
||||||
|
ellipticCurves = append(ellipticCurves, ecType)
|
||||||
|
}
|
||||||
|
|
||||||
|
case ecpfExtensionType: // Extensions: ec_point_formats
|
||||||
|
|
||||||
|
// Check if we can decode the next fields
|
||||||
|
if len(sex) < ecpfExtensionHeaderLen {
|
||||||
|
return &ParseError{LengthErr, 17}
|
||||||
|
}
|
||||||
|
|
||||||
|
ecpfsLen := uint8(sex[0])
|
||||||
|
numPF := int(ecpfsLen)
|
||||||
|
ellipticCurvePF = make([]uint8, numPF)
|
||||||
|
sex = sex[ecpfExtensionHeaderLen:]
|
||||||
|
|
||||||
|
// Check if we can decode the next fields
|
||||||
|
if len(sex) != numPF {
|
||||||
|
return &ParseError{LengthErr, 18}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < numPF; i++ {
|
||||||
|
ellipticCurvePF[i] = uint8(sex[i])
|
||||||
|
}
|
||||||
|
case versionExtensionType:
|
||||||
|
if len(sex) < versionExtensionHeaderLen {
|
||||||
|
return &ParseError{LengthErr, 19}
|
||||||
|
}
|
||||||
|
versionsLen := int(sex[0])
|
||||||
|
for i := 0; i < versionsLen; i += 2 {
|
||||||
|
versions = append(versions, binary.BigEndian.Uint16(sex[1:][i:]))
|
||||||
|
}
|
||||||
|
case signatureAlgorithmsExtensionType:
|
||||||
|
if len(sex) < signatureAlgorithmsExtensionHeaderLen {
|
||||||
|
return &ParseError{LengthErr, 20}
|
||||||
|
}
|
||||||
|
ssaLen := binary.BigEndian.Uint16(sex)
|
||||||
|
for i := 0; i < int(ssaLen); i += 2 {
|
||||||
|
signatureAlgorithms = append(signatureAlgorithms, binary.BigEndian.Uint16(sex[2:][i:]))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
exs = exs[4+exLen:]
|
||||||
|
}
|
||||||
|
j.ServerName = string(sni)
|
||||||
|
j.Extensions = extensions
|
||||||
|
j.EllipticCurves = ellipticCurves
|
||||||
|
j.EllipticCurvePF = ellipticCurvePF
|
||||||
|
j.Versions = versions
|
||||||
|
j.SignatureAlgorithms = signatureAlgorithms
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// marshalJA3 into a byte string
|
||||||
|
func (j *ClientHello) marshalJA3() {
|
||||||
|
// An uint16 can contain numbers with up to 5 digits and an uint8 can contain numbers with up to 3 digits, but we
|
||||||
|
// also need a byte for each separating character, except at the end.
|
||||||
|
byteStringLen := 6*(1+len(j.CipherSuites)+len(j.Extensions)+len(j.EllipticCurves)) + 4*len(j.EllipticCurvePF) - 1
|
||||||
|
byteString := make([]byte, 0, byteStringLen)
|
||||||
|
|
||||||
|
// Version
|
||||||
|
byteString = strconv.AppendUint(byteString, uint64(j.Version), 10)
|
||||||
|
byteString = append(byteString, commaByte)
|
||||||
|
|
||||||
|
// Cipher Suites
|
||||||
|
if len(j.CipherSuites) != 0 {
|
||||||
|
for _, val := range j.CipherSuites {
|
||||||
|
if val&GreaseBitmask != 0x0A0A {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
byteString = strconv.AppendUint(byteString, uint64(val), 10)
|
||||||
|
byteString = append(byteString, dashByte)
|
||||||
|
}
|
||||||
|
// Replace last dash with a comma
|
||||||
|
byteString[len(byteString)-1] = commaByte
|
||||||
|
} else {
|
||||||
|
byteString = append(byteString, commaByte)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extensions
|
||||||
|
if len(j.Extensions) != 0 {
|
||||||
|
for _, val := range j.Extensions {
|
||||||
|
if val&GreaseBitmask != 0x0A0A {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
byteString = strconv.AppendUint(byteString, uint64(val), 10)
|
||||||
|
byteString = append(byteString, dashByte)
|
||||||
|
}
|
||||||
|
// Replace last dash with a comma
|
||||||
|
byteString[len(byteString)-1] = commaByte
|
||||||
|
} else {
|
||||||
|
byteString = append(byteString, commaByte)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Elliptic curves
|
||||||
|
if len(j.EllipticCurves) != 0 {
|
||||||
|
for _, val := range j.EllipticCurves {
|
||||||
|
if val&GreaseBitmask != 0x0A0A {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
byteString = strconv.AppendUint(byteString, uint64(val), 10)
|
||||||
|
byteString = append(byteString, dashByte)
|
||||||
|
}
|
||||||
|
// Replace last dash with a comma
|
||||||
|
byteString[len(byteString)-1] = commaByte
|
||||||
|
} else {
|
||||||
|
byteString = append(byteString, commaByte)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ECPF
|
||||||
|
if len(j.EllipticCurvePF) != 0 {
|
||||||
|
for _, val := range j.EllipticCurvePF {
|
||||||
|
byteString = strconv.AppendUint(byteString, uint64(val), 10)
|
||||||
|
byteString = append(byteString, dashByte)
|
||||||
|
}
|
||||||
|
// Remove last dash
|
||||||
|
byteString = byteString[:len(byteString)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
j.ja3ByteString = byteString
|
||||||
|
}
|
99
common/sniff/bittorrent.go
Normal file
99
common/sniff/bittorrent.go
Normal file
@ -0,0 +1,99 @@
|
|||||||
|
package sniff
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/binary"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/sagernet/sing-box/adapter"
|
||||||
|
C "github.com/sagernet/sing-box/constant"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
trackerConnectFlag = 0
|
||||||
|
trackerProtocolID = 0x41727101980
|
||||||
|
trackerConnectMinSize = 16
|
||||||
|
)
|
||||||
|
|
||||||
|
// BitTorrent detects if the stream is a BitTorrent connection.
|
||||||
|
// For the BitTorrent protocol specification, see https://www.bittorrent.org/beps/bep_0003.html
|
||||||
|
func BitTorrent(_ context.Context, metadata *adapter.InboundContext, reader io.Reader) error {
|
||||||
|
var first byte
|
||||||
|
err := binary.Read(reader, binary.BigEndian, &first)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if first != 19 {
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
|
||||||
|
var protocol [19]byte
|
||||||
|
_, err = reader.Read(protocol[:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if string(protocol[:]) != "BitTorrent protocol" {
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
|
||||||
|
metadata.Protocol = C.ProtocolBitTorrent
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UTP detects if the packet is a uTP connection packet.
|
||||||
|
// For the uTP protocol specification, see
|
||||||
|
// 1. https://www.bittorrent.org/beps/bep_0029.html
|
||||||
|
// 2. https://github.com/bittorrent/libutp/blob/2b364cbb0650bdab64a5de2abb4518f9f228ec44/utp_internal.cpp#L112
|
||||||
|
func UTP(_ context.Context, metadata *adapter.InboundContext, packet []byte) error {
|
||||||
|
// A valid uTP packet must be at least 20 bytes long.
|
||||||
|
if len(packet) < 20 {
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
|
||||||
|
version := packet[0] & 0x0F
|
||||||
|
ty := packet[0] >> 4
|
||||||
|
if version != 1 || ty > 4 {
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate the extensions
|
||||||
|
extension := packet[1]
|
||||||
|
reader := bytes.NewReader(packet[20:])
|
||||||
|
for extension != 0 {
|
||||||
|
err := binary.Read(reader, binary.BigEndian, &extension)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var length byte
|
||||||
|
err = binary.Read(reader, binary.BigEndian, &length)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = reader.Seek(int64(length), io.SeekCurrent)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
metadata.Protocol = C.ProtocolBitTorrent
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UDPTracker detects if the packet is a UDP Tracker Protocol packet.
|
||||||
|
// For the UDP Tracker Protocol specification, see https://www.bittorrent.org/beps/bep_0015.html
|
||||||
|
func UDPTracker(_ context.Context, metadata *adapter.InboundContext, packet []byte) error {
|
||||||
|
if len(packet) < trackerConnectMinSize {
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
if binary.BigEndian.Uint64(packet[:8]) != trackerProtocolID {
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
if binary.BigEndian.Uint32(packet[8:12]) != trackerConnectFlag {
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
metadata.Protocol = C.ProtocolBitTorrent
|
||||||
|
return nil
|
||||||
|
}
|
73
common/sniff/bittorrent_test.go
Normal file
73
common/sniff/bittorrent_test.go
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
package sniff_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/hex"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/sagernet/sing-box/adapter"
|
||||||
|
"github.com/sagernet/sing-box/common/sniff"
|
||||||
|
C "github.com/sagernet/sing-box/constant"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSniffBittorrent(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
packets := []string{
|
||||||
|
"13426974546f7272656e742070726f746f636f6c0000000000100000e21ea9569b69bab33c97851d0298bdfa89bc90922d5554313631302dea812fcd6a3563e3be40c1d1",
|
||||||
|
"13426974546f7272656e742070726f746f636f6c00000000001000052aa4f5a7e209e54b32803d43670971c4c8caaa052d5452333030302d653369733079647675763638",
|
||||||
|
"13426974546f7272656e742070726f746f636f6c00000000001000052aa4f5a7e209e54b32803d43670971c4c8caaa052d5452343035302d6f7a316c6e79377931716130",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, pkt := range packets {
|
||||||
|
pkt, err := hex.DecodeString(pkt)
|
||||||
|
require.NoError(t, err)
|
||||||
|
var metadata adapter.InboundContext
|
||||||
|
err = sniff.BitTorrent(context.TODO(), &metadata, bytes.NewReader(pkt))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, C.ProtocolBitTorrent, metadata.Protocol)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSniffUTP(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
packets := []string{
|
||||||
|
"010041a282d7ee7b583afb160004000006d8318da776968f92d666f7963f32dae23ba0d2c810d8b8209cc4939f54fde9eeaa521c2c20c9ba7f43f4fb0375f28de06643b5e3ca4685ab7ac76adca99783be72ef05ed59ef4234f5712b75b4c7c0d7bee8fe2ca20ad626ba5bb0ffcc16bf06790896f888048cf72716419a07db1a3dca4550fbcea75b53e97235168a221cf3e553dfbb723961bd719fab038d86e0ecb74747f5a2cd669de1c4b9ad375f3a492d09d98cdfad745435625401315bbba98d35d32086299801377b93495a63a9efddb8d05f5b37a5c5b1c0a25e917f12007bb5e05013ada8aff544fab8cadf61d80ddb0b60f12741e44515a109d144fd53ef845acb4b5ccf0d6fc302d7003d76df3fc3423bb0237301c9e88f900c2d392a8e0fdb36d143cf7527a93fd0a2638b746e72f6699fffcd4fd15348fce780d4caa04382fd9faf1ca0ae377ca805da7536662b84f5ee18dd3ae38fcb095a7543e55f9069ae92c8cf54ae44e97b558d35e2545c66601ed2149cbc32bd6df199a2be7cf0da8b2ff137e0d23e776bc87248425013876d3a3cc31a83b424b752bd0346437f24b532978005d8f5b1b0be1a37a2489c32a18a9ad3118e3f9d30eb299bffae18e1f0677c2a5c185e62519093fe6bc2b7339299ea50a587989f726ca6443a75dd5bb936f6367c6355d80fae53ff529d740b2e5576e3eefdf1fdbfc69c3c8d8ac750512635de63e054bee1d3b689bc1b2bc3d2601e42a00b5c89066d173d4ae7ffedfd2274e5cf6d868fbe640aedb69b8246142f00b32d459974287537ddd5373460dcbc92f5cfdd7a3ed6020822ae922d947893752ca1983d0d32977374c384ac8f5ab566859019b7351526b9f13e932037a55bb052d9deb3b3c23317e0784fdc51a64f2159bfea3b069cf5caf02ee2c3c1a6b6b427bb16165713e8802d95b5c8ed77953690e994bd38c9ae113fedaf6ee7fc2b96c032ceafc2a530ad0422e84546b9c6ad8ef6ea02fa508abddd1805c38a7b42e9b7c971b1b636865ebec06ed754bb404cd6b4e6cc8cb77bd4a0c43410d5cd5ef8fe853a66d49b3b9e06cb141236cdbfdd5761601dc54d1250b86c660e0f898fe62526fdd9acf0eab60a3bbbb2151970461f28f10b31689594bea646c4b03ee197d63bdef4e5a7c22716b3bb9494a83b78ecd81b338b80ac6c09c43485b1b09ba41c74343832c78f0520c1d659ac9eb1502094141e82fb9e5e620970ebc0655514c43c294a7714cbf9a499d277daf089f556398a01589a77494bec8bfb60a108f3813b55368672b88c1af40f6b3c8b513f7c70c3e0efce85228b8b9ec67ba0393f9f7305024d8e2da6a26cf85613d14f249170ce1000089df4c9c260df7f8292aa2ecb5d5bac97656d59aa248caedea2d198e51ce87baece338716d114b458de02d65c9ff808ca5b5b73723b4d1e962d9ac2d98176544dc9984cf8554d07820ef3dd0861cfe57b478328046380de589adad94ee44743ffac73bb7361feca5d56f07cf8ce75080e261282ae30350d7882679b15cab9e7e53ddf93310b33f7390ae5d318bb53f387e6af5d0ef4f947fc9cb8e7e38b52c7f8d772ece6156b38d88796ea19df02c53723b44df7c76315a0de9462f27287e682d2b4cda1a68fe00d7e48c51ee981be44e1ca940fb5190c12655edb4a83c3a4f33e48a015692df4f0b3d61656e362aca657b5ae8c12db5a0db3db1e45135ee918b66918f40e53c4f83e9da0cddfe63f736ae751ab3837a30ae3220d8e8e311487093a7b90c7e7e40dd54ca750e19452f9193aa892aa6a6229ab493dadae988b1724f7898ee69c36d3eb7364c4adbeca811cfe2065873e78c2b6dfdf1595f7a7831c07e03cda82e4f86f76438dfb2b07c13638ce7b509cfa71b88b5102b39a203b423202088e1c2103319cb32c13c1e546ff8612fa194c95a7808ab767c265a1bd5fa0efed5c8ec1701876a00ec8",
|
||||||
|
"01001ecb68176f215d04326300100000dbcf30292d14b54e9ee2d115ee5b8ebc7fad3e882d4fcdd0c14c6b917c11cb4c6a9f410b52a33ae97c2ac77c7a2b122b8955e09af3c5c595f1b2e79ca57cfe44c44e069610773b9bc9ba223d7f6b383e3adddd03fb88a8476028e30979c2ef321ffc97c5c132bcf9ac5b410bbb5ec6cefca3c7209202a14c5ae922b6b157b0a80249d13ffe5b996af0bc8e54ba576d148372494303e7ead0602b05b9c8fc97d48508a028a04d63a1fd28b0edfcd5c51715f63188b53eefede98a76912dca98518551a8856567307a56a702cbfcc115ea0c755b418bc2c7b57721239b82f09fb24328a4b0ce0f109bcb2a64e04b8aadb1f8487585425acdf8fc4ec8ea93cfcec5ac098bb29d42ddef6e46b03f34a5de28316726699b7cb5195c33e5c48abe87d591d63f9991c84c30819d186d6e0e95fd83c8dff07aa669c4430989bcaccfeacb9bcadbdb4d8f1964dbeb9687745656edd30b21c66cc0a1d742a78717d134a19a7f02d285a4973b1a198c00cfdff4676608dc4f3e817e3463c3b4e2c80d3e8d4fbac541a58a2fb7ad6939f607f8144eff6c8b0adc28ee5609ea158987519892fb",
|
||||||
|
"21001ecb6817f2805d044fd700100000dbd03029",
|
||||||
|
"410277ef0b1fb1f60000000000040000c233000000080000000000000000",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, pkt := range packets {
|
||||||
|
pkt, err := hex.DecodeString(pkt)
|
||||||
|
require.NoError(t, err)
|
||||||
|
var metadata adapter.InboundContext
|
||||||
|
err = sniff.UTP(context.TODO(), &metadata, pkt)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, C.ProtocolBitTorrent, metadata.Protocol)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSniffUDPTracker(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
connectPackets := []string{
|
||||||
|
"00000417271019800000000078e90560",
|
||||||
|
"00000417271019800000000022c5d64d",
|
||||||
|
"000004172710198000000000b3863541",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, pkt := range connectPackets {
|
||||||
|
pkt, err := hex.DecodeString(pkt)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
var metadata adapter.InboundContext
|
||||||
|
err = sniff.UDPTracker(context.TODO(), &metadata, pkt)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, C.ProtocolBitTorrent, metadata.Protocol)
|
||||||
|
}
|
||||||
|
}
|
@ -17,18 +17,17 @@ import (
|
|||||||
mDNS "github.com/miekg/dns"
|
mDNS "github.com/miekg/dns"
|
||||||
)
|
)
|
||||||
|
|
||||||
func StreamDomainNameQuery(readCtx context.Context, reader io.Reader) (*adapter.InboundContext, error) {
|
func StreamDomainNameQuery(readCtx context.Context, metadata *adapter.InboundContext, reader io.Reader) error {
|
||||||
var length uint16
|
var length uint16
|
||||||
err := binary.Read(reader, binary.BigEndian, &length)
|
err := binary.Read(reader, binary.BigEndian, &length)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return os.ErrInvalid
|
||||||
}
|
}
|
||||||
if length == 0 {
|
if length == 0 {
|
||||||
return nil, os.ErrInvalid
|
return os.ErrInvalid
|
||||||
}
|
}
|
||||||
buffer := buf.NewSize(int(length))
|
buffer := buf.NewSize(int(length))
|
||||||
defer buffer.Release()
|
defer buffer.Release()
|
||||||
|
|
||||||
readCtx, cancel := context.WithTimeout(readCtx, time.Millisecond*100)
|
readCtx, cancel := context.WithTimeout(readCtx, time.Millisecond*100)
|
||||||
var readTask task.Group
|
var readTask task.Group
|
||||||
readTask.Append0(func(ctx context.Context) error {
|
readTask.Append0(func(ctx context.Context) error {
|
||||||
@ -37,19 +36,20 @@ func StreamDomainNameQuery(readCtx context.Context, reader io.Reader) (*adapter.
|
|||||||
err = readTask.Run(readCtx)
|
err = readTask.Run(readCtx)
|
||||||
cancel()
|
cancel()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
return DomainNameQuery(readCtx, buffer.Bytes())
|
return DomainNameQuery(readCtx, metadata, buffer.Bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
func DomainNameQuery(ctx context.Context, packet []byte) (*adapter.InboundContext, error) {
|
func DomainNameQuery(ctx context.Context, metadata *adapter.InboundContext, packet []byte) error {
|
||||||
var msg mDNS.Msg
|
var msg mDNS.Msg
|
||||||
err := msg.Unpack(packet)
|
err := msg.Unpack(packet)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
if len(msg.Question) == 0 || msg.Question[0].Qclass != mDNS.ClassINET || !M.IsDomainName(msg.Question[0].Name) {
|
if len(msg.Question) == 0 || msg.Question[0].Qclass != mDNS.ClassINET || !M.IsDomainName(msg.Question[0].Name) {
|
||||||
return nil, os.ErrInvalid
|
return os.ErrInvalid
|
||||||
}
|
}
|
||||||
return &adapter.InboundContext{Protocol: C.ProtocolDNS}, nil
|
metadata.Protocol = C.ProtocolDNS
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
32
common/sniff/dtls.go
Normal file
32
common/sniff/dtls.go
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
package sniff
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/sagernet/sing-box/adapter"
|
||||||
|
C "github.com/sagernet/sing-box/constant"
|
||||||
|
)
|
||||||
|
|
||||||
|
func DTLSRecord(ctx context.Context, metadata *adapter.InboundContext, packet []byte) error {
|
||||||
|
const fixedHeaderSize = 13
|
||||||
|
if len(packet) < fixedHeaderSize {
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
contentType := packet[0]
|
||||||
|
switch contentType {
|
||||||
|
case 20, 21, 22, 23, 25:
|
||||||
|
default:
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
versionMajor := packet[1]
|
||||||
|
if versionMajor != 0xfe {
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
versionMinor := packet[2]
|
||||||
|
if versionMinor != 0xff && versionMinor != 0xfd {
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
metadata.Protocol = C.ProtocolDTLS
|
||||||
|
return nil
|
||||||
|
}
|
33
common/sniff/dtls_test.go
Normal file
33
common/sniff/dtls_test.go
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
package sniff_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/hex"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/sagernet/sing-box/adapter"
|
||||||
|
"github.com/sagernet/sing-box/common/sniff"
|
||||||
|
C "github.com/sagernet/sing-box/constant"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSniffDTLSClientHello(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
packet, err := hex.DecodeString("16fefd0000000000000000007e010000720000000000000072fefd668a43523798e064bd806d0c87660de9c611a59bbdfc3892c4e072d94f2cafc40000000cc02bc02fc00ac014c02cc0300100003c000d0010000e0403050306030401050106010807ff01000100000a00080006001d00170018000b00020100000e000900060008000700010000170000")
|
||||||
|
require.NoError(t, err)
|
||||||
|
var metadata adapter.InboundContext
|
||||||
|
err = sniff.DTLSRecord(context.Background(), &metadata, packet)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, metadata.Protocol, C.ProtocolDTLS)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSniffDTLSClientApplicationData(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
packet, err := hex.DecodeString("17fefd000100000000000100440001000000000001a4f682b77ecadd10f3f3a2f78d90566212366ff8209fd77314f5a49352f9bb9bd12f4daba0b4736ae29e46b9714d3b424b3e6d0234736619b5aa0d3f")
|
||||||
|
require.NoError(t, err)
|
||||||
|
var metadata adapter.InboundContext
|
||||||
|
err = sniff.DTLSRecord(context.Background(), &metadata, packet)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, metadata.Protocol, C.ProtocolDTLS)
|
||||||
|
}
|
@ -11,10 +11,12 @@ import (
|
|||||||
"github.com/sagernet/sing/protocol/http"
|
"github.com/sagernet/sing/protocol/http"
|
||||||
)
|
)
|
||||||
|
|
||||||
func HTTPHost(ctx context.Context, reader io.Reader) (*adapter.InboundContext, error) {
|
func HTTPHost(_ context.Context, metadata *adapter.InboundContext, reader io.Reader) error {
|
||||||
request, err := http.ReadRequest(std_bufio.NewReader(reader))
|
request, err := http.ReadRequest(std_bufio.NewReader(reader))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
return &adapter.InboundContext{Protocol: C.ProtocolHTTP, Domain: M.ParseSocksaddr(request.Host).AddrString()}, nil
|
metadata.Protocol = C.ProtocolHTTP
|
||||||
|
metadata.Domain = M.ParseSocksaddr(request.Host).AddrString()
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -5,6 +5,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/sagernet/sing-box/adapter"
|
||||||
"github.com/sagernet/sing-box/common/sniff"
|
"github.com/sagernet/sing-box/common/sniff"
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
@ -13,7 +14,8 @@ import (
|
|||||||
func TestSniffHTTP1(t *testing.T) {
|
func TestSniffHTTP1(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
pkt := "GET / HTTP/1.1\r\nHost: www.google.com\r\nAccept: */*\r\n\r\n"
|
pkt := "GET / HTTP/1.1\r\nHost: www.google.com\r\nAccept: */*\r\n\r\n"
|
||||||
metadata, err := sniff.HTTPHost(context.Background(), strings.NewReader(pkt))
|
var metadata adapter.InboundContext
|
||||||
|
err := sniff.HTTPHost(context.Background(), &metadata, strings.NewReader(pkt))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, metadata.Domain, "www.google.com")
|
require.Equal(t, metadata.Domain, "www.google.com")
|
||||||
}
|
}
|
||||||
@ -21,7 +23,8 @@ func TestSniffHTTP1(t *testing.T) {
|
|||||||
func TestSniffHTTP1WithPort(t *testing.T) {
|
func TestSniffHTTP1WithPort(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
pkt := "GET / HTTP/1.1\r\nHost: www.gov.cn:8080\r\nAccept: */*\r\n\r\n"
|
pkt := "GET / HTTP/1.1\r\nHost: www.gov.cn:8080\r\nAccept: */*\r\n\r\n"
|
||||||
metadata, err := sniff.HTTPHost(context.Background(), strings.NewReader(pkt))
|
var metadata adapter.InboundContext
|
||||||
|
err := sniff.HTTPHost(context.Background(), &metadata, strings.NewReader(pkt))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, metadata.Domain, "www.gov.cn")
|
require.Equal(t, metadata.Domain, "www.gov.cn")
|
||||||
}
|
}
|
||||||
|
@ -5,95 +5,99 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"crypto"
|
"crypto"
|
||||||
"crypto/aes"
|
"crypto/aes"
|
||||||
|
"crypto/tls"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/sagernet/sing-box/adapter"
|
"github.com/sagernet/sing-box/adapter"
|
||||||
|
"github.com/sagernet/sing-box/common/ja3"
|
||||||
"github.com/sagernet/sing-box/common/sniff/internal/qtls"
|
"github.com/sagernet/sing-box/common/sniff/internal/qtls"
|
||||||
C "github.com/sagernet/sing-box/constant"
|
C "github.com/sagernet/sing-box/constant"
|
||||||
|
"github.com/sagernet/sing/common/buf"
|
||||||
E "github.com/sagernet/sing/common/exceptions"
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
|
|
||||||
"golang.org/x/crypto/hkdf"
|
"golang.org/x/crypto/hkdf"
|
||||||
)
|
)
|
||||||
|
|
||||||
func QUICClientHello(ctx context.Context, packet []byte) (*adapter.InboundContext, error) {
|
var ErrClientHelloFragmented = E.New("need more packet for chromium QUIC connection")
|
||||||
reader := bytes.NewReader(packet)
|
|
||||||
|
|
||||||
|
func QUICClientHello(ctx context.Context, metadata *adapter.InboundContext, packet []byte) error {
|
||||||
|
reader := bytes.NewReader(packet)
|
||||||
typeByte, err := reader.ReadByte()
|
typeByte, err := reader.ReadByte()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
if typeByte&0x40 == 0 {
|
if typeByte&0x40 == 0 {
|
||||||
return nil, E.New("bad type byte")
|
return E.New("bad type byte")
|
||||||
}
|
}
|
||||||
var versionNumber uint32
|
var versionNumber uint32
|
||||||
err = binary.Read(reader, binary.BigEndian, &versionNumber)
|
err = binary.Read(reader, binary.BigEndian, &versionNumber)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
if versionNumber != qtls.VersionDraft29 && versionNumber != qtls.Version1 && versionNumber != qtls.Version2 {
|
if versionNumber != qtls.VersionDraft29 && versionNumber != qtls.Version1 && versionNumber != qtls.Version2 {
|
||||||
return nil, E.New("bad version")
|
return E.New("bad version")
|
||||||
}
|
}
|
||||||
packetType := (typeByte & 0x30) >> 4
|
packetType := (typeByte & 0x30) >> 4
|
||||||
if packetType == 0 && versionNumber == qtls.Version2 || packetType == 2 && versionNumber != qtls.Version2 || packetType > 2 {
|
if packetType == 0 && versionNumber == qtls.Version2 || packetType == 2 && versionNumber != qtls.Version2 || packetType > 2 {
|
||||||
return nil, E.New("bad packet type")
|
return E.New("bad packet type")
|
||||||
}
|
}
|
||||||
|
|
||||||
destConnIDLen, err := reader.ReadByte()
|
destConnIDLen, err := reader.ReadByte()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if destConnIDLen == 0 || destConnIDLen > 20 {
|
if destConnIDLen == 0 || destConnIDLen > 20 {
|
||||||
return nil, E.New("bad destination connection id length")
|
return E.New("bad destination connection id length")
|
||||||
}
|
}
|
||||||
|
|
||||||
destConnID := make([]byte, destConnIDLen)
|
destConnID := make([]byte, destConnIDLen)
|
||||||
_, err = io.ReadFull(reader, destConnID)
|
_, err = io.ReadFull(reader, destConnID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
srcConnIDLen, err := reader.ReadByte()
|
srcConnIDLen, err := reader.ReadByte()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = io.CopyN(io.Discard, reader, int64(srcConnIDLen))
|
_, err = io.CopyN(io.Discard, reader, int64(srcConnIDLen))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
tokenLen, err := qtls.ReadUvarint(reader)
|
tokenLen, err := qtls.ReadUvarint(reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = io.CopyN(io.Discard, reader, int64(tokenLen))
|
_, err = io.CopyN(io.Discard, reader, int64(tokenLen))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
packetLen, err := qtls.ReadUvarint(reader)
|
packetLen, err := qtls.ReadUvarint(reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
hdrLen := int(reader.Size()) - reader.Len()
|
hdrLen := int(reader.Size()) - reader.Len()
|
||||||
if hdrLen+int(packetLen) > len(packet) {
|
if hdrLen+int(packetLen) > len(packet) {
|
||||||
return nil, os.ErrInvalid
|
return os.ErrInvalid
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = io.CopyN(io.Discard, reader, 4)
|
_, err = io.CopyN(io.Discard, reader, 4)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
pnBytes := make([]byte, aes.BlockSize)
|
pnBytes := make([]byte, aes.BlockSize)
|
||||||
_, err = io.ReadFull(reader, pnBytes)
|
_, err = io.ReadFull(reader, pnBytes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var salt []byte
|
var salt []byte
|
||||||
@ -117,7 +121,7 @@ func QUICClientHello(ctx context.Context, packet []byte) (*adapter.InboundContex
|
|||||||
hpKey := qtls.HKDFExpandLabel(crypto.SHA256, secret, []byte{}, hkdfHeaderProtectionLabel, 16)
|
hpKey := qtls.HKDFExpandLabel(crypto.SHA256, secret, []byte{}, hkdfHeaderProtectionLabel, 16)
|
||||||
block, err := aes.NewCipher(hpKey)
|
block, err := aes.NewCipher(hpKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
mask := make([]byte, aes.BlockSize)
|
mask := make([]byte, aes.BlockSize)
|
||||||
block.Encrypt(mask, pnBytes)
|
block.Encrypt(mask, pnBytes)
|
||||||
@ -129,7 +133,7 @@ func QUICClientHello(ctx context.Context, packet []byte) (*adapter.InboundContex
|
|||||||
}
|
}
|
||||||
packetNumberLength := newPacket[0]&0x3 + 1
|
packetNumberLength := newPacket[0]&0x3 + 1
|
||||||
if hdrLen+int(packetNumberLength) > int(packetLen)+hdrLen {
|
if hdrLen+int(packetNumberLength) > int(packetLen)+hdrLen {
|
||||||
return nil, os.ErrInvalid
|
return os.ErrInvalid
|
||||||
}
|
}
|
||||||
var packetNumber uint32
|
var packetNumber uint32
|
||||||
switch packetNumberLength {
|
switch packetNumberLength {
|
||||||
@ -142,7 +146,7 @@ func QUICClientHello(ctx context.Context, packet []byte) (*adapter.InboundContex
|
|||||||
case 4:
|
case 4:
|
||||||
packetNumber = binary.BigEndian.Uint32(newPacket[hdrLen:])
|
packetNumber = binary.BigEndian.Uint32(newPacket[hdrLen:])
|
||||||
default:
|
default:
|
||||||
return nil, E.New("bad packet number length")
|
return E.New("bad packet number length")
|
||||||
}
|
}
|
||||||
extHdrLen := hdrLen + int(packetNumberLength)
|
extHdrLen := hdrLen + int(packetNumberLength)
|
||||||
copy(newPacket[extHdrLen:hdrLen+4], packet[extHdrLen:])
|
copy(newPacket[extHdrLen:hdrLen+4], packet[extHdrLen:])
|
||||||
@ -166,138 +170,208 @@ func QUICClientHello(ctx context.Context, packet []byte) (*adapter.InboundContex
|
|||||||
binary.BigEndian.PutUint64(nonce[len(nonce)-8:], uint64(packetNumber))
|
binary.BigEndian.PutUint64(nonce[len(nonce)-8:], uint64(packetNumber))
|
||||||
decrypted, err := cipher.Open(newPacket[extHdrLen:extHdrLen], nonce, data, newPacket[:extHdrLen])
|
decrypted, err := cipher.Open(newPacket[extHdrLen:extHdrLen], nonce, data, newPacket[:extHdrLen])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
var frameType byte
|
var frameType byte
|
||||||
var frameLen uint64
|
var fragments []qCryptoFragment
|
||||||
var fragments []struct {
|
|
||||||
offset uint64
|
|
||||||
length uint64
|
|
||||||
payload []byte
|
|
||||||
}
|
|
||||||
decryptedReader := bytes.NewReader(decrypted)
|
decryptedReader := bytes.NewReader(decrypted)
|
||||||
|
const (
|
||||||
|
frameTypePadding = 0x00
|
||||||
|
frameTypePing = 0x01
|
||||||
|
frameTypeAck = 0x02
|
||||||
|
frameTypeAck2 = 0x03
|
||||||
|
frameTypeCrypto = 0x06
|
||||||
|
frameTypeConnectionClose = 0x1c
|
||||||
|
)
|
||||||
|
var frameTypeList []uint8
|
||||||
for {
|
for {
|
||||||
frameType, err = decryptedReader.ReadByte()
|
frameType, err = decryptedReader.ReadByte()
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
frameTypeList = append(frameTypeList, frameType)
|
||||||
switch frameType {
|
switch frameType {
|
||||||
case 0x00: // PADDING
|
case frameTypePadding:
|
||||||
continue
|
continue
|
||||||
case 0x01: // PING
|
case frameTypePing:
|
||||||
continue
|
continue
|
||||||
case 0x02, 0x03: // ACK
|
case frameTypeAck, frameTypeAck2:
|
||||||
_, err = qtls.ReadUvarint(decryptedReader) // Largest Acknowledged
|
_, err = qtls.ReadUvarint(decryptedReader) // Largest Acknowledged
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
_, err = qtls.ReadUvarint(decryptedReader) // ACK Delay
|
_, err = qtls.ReadUvarint(decryptedReader) // ACK Delay
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
ackRangeCount, err := qtls.ReadUvarint(decryptedReader) // ACK Range Count
|
ackRangeCount, err := qtls.ReadUvarint(decryptedReader) // ACK Range Count
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
_, err = qtls.ReadUvarint(decryptedReader) // First ACK Range
|
_, err = qtls.ReadUvarint(decryptedReader) // First ACK Range
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
for i := 0; i < int(ackRangeCount); i++ {
|
for i := 0; i < int(ackRangeCount); i++ {
|
||||||
_, err = qtls.ReadUvarint(decryptedReader) // Gap
|
_, err = qtls.ReadUvarint(decryptedReader) // Gap
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
_, err = qtls.ReadUvarint(decryptedReader) // ACK Range Length
|
_, err = qtls.ReadUvarint(decryptedReader) // ACK Range Length
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if frameType == 0x03 {
|
if frameType == 0x03 {
|
||||||
_, err = qtls.ReadUvarint(decryptedReader) // ECT0 Count
|
_, err = qtls.ReadUvarint(decryptedReader) // ECT0 Count
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
_, err = qtls.ReadUvarint(decryptedReader) // ECT1 Count
|
_, err = qtls.ReadUvarint(decryptedReader) // ECT1 Count
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
_, err = qtls.ReadUvarint(decryptedReader) // ECN-CE Count
|
_, err = qtls.ReadUvarint(decryptedReader) // ECN-CE Count
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case 0x06: // CRYPTO
|
case frameTypeCrypto:
|
||||||
var offset uint64
|
var offset uint64
|
||||||
offset, err = qtls.ReadUvarint(decryptedReader)
|
offset, err = qtls.ReadUvarint(decryptedReader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return &adapter.InboundContext{Protocol: C.ProtocolQUIC}, err
|
return err
|
||||||
}
|
}
|
||||||
var length uint64
|
var length uint64
|
||||||
length, err = qtls.ReadUvarint(decryptedReader)
|
length, err = qtls.ReadUvarint(decryptedReader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return &adapter.InboundContext{Protocol: C.ProtocolQUIC}, err
|
return err
|
||||||
}
|
}
|
||||||
index := len(decrypted) - decryptedReader.Len()
|
index := len(decrypted) - decryptedReader.Len()
|
||||||
fragments = append(fragments, struct {
|
fragments = append(fragments, qCryptoFragment{offset, length, decrypted[index : index+int(length)]})
|
||||||
offset uint64
|
|
||||||
length uint64
|
|
||||||
payload []byte
|
|
||||||
}{offset, length, decrypted[index : index+int(length)]})
|
|
||||||
frameLen += length
|
|
||||||
_, err = decryptedReader.Seek(int64(length), io.SeekCurrent)
|
_, err = decryptedReader.Seek(int64(length), io.SeekCurrent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
case 0x1c: // CONNECTION_CLOSE
|
case frameTypeConnectionClose:
|
||||||
_, err = qtls.ReadUvarint(decryptedReader) // Error Code
|
_, err = qtls.ReadUvarint(decryptedReader) // Error Code
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
_, err = qtls.ReadUvarint(decryptedReader) // Frame Type
|
_, err = qtls.ReadUvarint(decryptedReader) // Frame Type
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
var length uint64
|
var length uint64
|
||||||
length, err = qtls.ReadUvarint(decryptedReader) // Reason Phrase Length
|
length, err = qtls.ReadUvarint(decryptedReader) // Reason Phrase Length
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
_, err = decryptedReader.Seek(int64(length), io.SeekCurrent) // Reason Phrase
|
_, err = decryptedReader.Seek(int64(length), io.SeekCurrent) // Reason Phrase
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return nil, os.ErrInvalid
|
return os.ErrInvalid
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
tlsHdr := make([]byte, 5)
|
if metadata.SniffContext != nil {
|
||||||
tlsHdr[0] = 0x16
|
fragments = append(fragments, metadata.SniffContext.([]qCryptoFragment)...)
|
||||||
binary.BigEndian.PutUint16(tlsHdr[1:], uint16(0x0303))
|
metadata.SniffContext = nil
|
||||||
binary.BigEndian.PutUint16(tlsHdr[3:], uint16(frameLen))
|
}
|
||||||
|
var frameLen uint64
|
||||||
|
for _, fragment := range fragments {
|
||||||
|
frameLen += fragment.length
|
||||||
|
}
|
||||||
|
buffer := buf.NewSize(5 + int(frameLen))
|
||||||
|
defer buffer.Release()
|
||||||
|
buffer.WriteByte(0x16)
|
||||||
|
binary.Write(buffer, binary.BigEndian, uint16(0x0303))
|
||||||
|
binary.Write(buffer, binary.BigEndian, uint16(frameLen))
|
||||||
var index uint64
|
var index uint64
|
||||||
var length int
|
var length int
|
||||||
var readers []io.Reader
|
|
||||||
readers = append(readers, bytes.NewReader(tlsHdr))
|
|
||||||
find:
|
find:
|
||||||
for {
|
for {
|
||||||
for _, fragment := range fragments {
|
for _, fragment := range fragments {
|
||||||
if fragment.offset == index {
|
if fragment.offset == index {
|
||||||
readers = append(readers, bytes.NewReader(fragment.payload))
|
buffer.Write(fragment.payload)
|
||||||
index = fragment.offset + fragment.length
|
index = fragment.offset + fragment.length
|
||||||
length++
|
length++
|
||||||
continue find
|
continue find
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if length == len(fragments) {
|
break
|
||||||
break
|
|
||||||
}
|
|
||||||
return &adapter.InboundContext{Protocol: C.ProtocolQUIC}, E.New("bad fragments")
|
|
||||||
}
|
|
||||||
metadata, err := TLSClientHello(ctx, io.MultiReader(readers...))
|
|
||||||
if err != nil {
|
|
||||||
return &adapter.InboundContext{Protocol: C.ProtocolQUIC}, err
|
|
||||||
}
|
}
|
||||||
metadata.Protocol = C.ProtocolQUIC
|
metadata.Protocol = C.ProtocolQUIC
|
||||||
return metadata, nil
|
fingerprint, err := ja3.Compute(buffer.Bytes())
|
||||||
|
if err != nil {
|
||||||
|
metadata.Protocol = C.ProtocolQUIC
|
||||||
|
metadata.Client = C.ClientChromium
|
||||||
|
metadata.SniffContext = fragments
|
||||||
|
return ErrClientHelloFragmented
|
||||||
|
}
|
||||||
|
metadata.Domain = fingerprint.ServerName
|
||||||
|
for metadata.Client == "" {
|
||||||
|
if len(frameTypeList) == 1 {
|
||||||
|
metadata.Client = C.ClientFirefox
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if frameTypeList[0] == frameTypeCrypto && isZero(frameTypeList[1:]) {
|
||||||
|
if len(fingerprint.Versions) == 2 && fingerprint.Versions[0]&ja3.GreaseBitmask == 0x0A0A &&
|
||||||
|
len(fingerprint.EllipticCurves) == 5 && fingerprint.EllipticCurves[0]&ja3.GreaseBitmask == 0x0A0A {
|
||||||
|
metadata.Client = C.ClientSafari
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if len(fingerprint.CipherSuites) == 1 && fingerprint.CipherSuites[0] == tls.TLS_AES_256_GCM_SHA384 &&
|
||||||
|
len(fingerprint.EllipticCurves) == 1 && fingerprint.EllipticCurves[0] == uint16(tls.X25519) &&
|
||||||
|
len(fingerprint.SignatureAlgorithms) == 1 && fingerprint.SignatureAlgorithms[0] == uint16(tls.ECDSAWithP256AndSHA256) {
|
||||||
|
metadata.Client = C.ClientSafari
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if frameTypeList[len(frameTypeList)-1] == frameTypeCrypto && isZero(frameTypeList[:len(frameTypeList)-1]) {
|
||||||
|
metadata.Client = C.ClientQUICGo
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if count(frameTypeList, frameTypeCrypto) > 1 || count(frameTypeList, frameTypePing) > 0 {
|
||||||
|
if maybeUQUIC(fingerprint) {
|
||||||
|
metadata.Client = C.ClientQUICGo
|
||||||
|
} else {
|
||||||
|
metadata.Client = C.ClientChromium
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
metadata.Client = C.ClientUnknown
|
||||||
|
//nolint:staticcheck
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func isZero(slices []uint8) bool {
|
||||||
|
for _, slice := range slices {
|
||||||
|
if slice != 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func count(slices []uint8, value uint8) int {
|
||||||
|
var times int
|
||||||
|
for _, slice := range slices {
|
||||||
|
if slice == value {
|
||||||
|
times++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return times
|
||||||
|
}
|
||||||
|
|
||||||
|
type qCryptoFragment struct {
|
||||||
|
offset uint64
|
||||||
|
length uint64
|
||||||
|
payload []byte
|
||||||
}
|
}
|
||||||
|
24
common/sniff/quic_blacklist.go
Normal file
24
common/sniff/quic_blacklist.go
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
package sniff
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
|
||||||
|
"github.com/sagernet/sing-box/common/ja3"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Chromium sends separate client hello packets, but UQUIC has not yet implemented this behavior
|
||||||
|
// The cronet without this behavior does not have version 115
|
||||||
|
var uQUICChrome115 = &ja3.ClientHello{
|
||||||
|
Version: tls.VersionTLS12,
|
||||||
|
CipherSuites: []uint16{4865, 4866, 4867},
|
||||||
|
Extensions: []uint16{0, 10, 13, 16, 27, 43, 45, 51, 57, 17513},
|
||||||
|
EllipticCurves: []uint16{29, 23, 24},
|
||||||
|
SignatureAlgorithms: []uint16{1027, 2052, 1025, 1283, 2053, 1281, 2054, 1537, 513},
|
||||||
|
}
|
||||||
|
|
||||||
|
func maybeUQUIC(fingerprint *ja3.ClientHello) bool {
|
||||||
|
if uQUICChrome115.Equals(fingerprint, true) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
@ -5,31 +5,69 @@ import (
|
|||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/sagernet/sing-box/adapter"
|
||||||
"github.com/sagernet/sing-box/common/sniff"
|
"github.com/sagernet/sing-box/common/sniff"
|
||||||
|
C "github.com/sagernet/sing-box/constant"
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestSniffQUICv1(t *testing.T) {
|
func TestSniffQUICChromium(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
pkt, err := hex.DecodeString("cc0000000108d2dc7bad02241f5003796e71004215a71bfcb05159416c724be418537389acdd9a4047306283dcb4d7a9cad5cc06322042d204da67a8dbaa328ab476bb428b48fd001501863afd203f8d4ef085629d664f1a734a65969a47e4a63d4e01a21f18c1d90db0c027180906dc135f9ae421bb8617314c8d54c175fef3d3383d310d0916ebcbd6eed9329befbbb109d8fd4af1d2cf9d6adce8e6c1260a7f8256e273e326da0aa7cc148d76e7a08489dc9d52ade89c027cbc3491ada46417c2c04e2ca768e9a7dd6aa00c594e48b678927325da796817693499bb727050cb3baf3d3291a397c3a8d868e8ec7b8f7295e347455c9dadbe2252ae917ac793d958c7fb8a3d2cdb34e3891eb4286f18617556ff7216dd60256aa5b1d11ff4753459fc5f9dedf11d483a26a0835dc6cd50e1c1f54f86e8f1e502821183cd874f6447a74e818bf3445c7795acf4559d1c1fac474911d2ead5c8d23e4aa4f67afb66efe305a30a0b5d825679b31ddc186cbea936535795c7e8c378c87b8c5adc065154d15bae8f85ac8fec2da40c3aa623b682a065440831555011d7647cde44446a0fb4cf5892f2c088ae1920643094be72e3c499fe8d265caf939e8ab607a5b9317917d2a32a812e8a0e6a2f84721bbb5984ffd242838f705d13f4cfb249bc6a5c80d58ac2595edf56648ec3fe21d787573c253a79805252d6d81e26d367d4ff29ef66b5fe8992086af7bada8cad10b82a7c0dc406c5b6d0c5ec3c583e767f759ce08cad6c3c8f91e5a8")
|
pkt, err := hex.DecodeString("c30000000108f40d654cc09b27f5000044d08a94548e57e43cc5483f129986187c432d58d46674830442988f869566a6e31e2ae37c9f7acbf61cc81621594fab0b3dfdc1635460b32389563dc8e74006315661cd22694114612973c1c45910621713a48b375854f095e8a77ccf3afa64e972f0f7f7002f50e0b014b1b146ea47c07fb20b73ad5587872b51a0b3fafdf1c4cf4fe6f8b112142392efa25d993abe2f42582be145148bdfe12edcd96c3655b65a4781b093e5594ba8e3ae5320f12e8314fc3ca374128cc43381046c322b964681ed4395c813b28534505118201459665a44b8f0abead877de322e9040631d20b05f15b81fa7ff785d4041aecc37c7e2ccdc5d1532787ce566517e8985fd5c200dbfd1e67bc255efaba94cfc07bb52fea4a90887413b134f2715b5643542aa897c6116486f428d82da64d2a2c1e1bdd40bd592558901a554b003d6966ac5a7b8b9413eddbf6ef21f28386c74981e3ce1d724c341e95494907626659692720c81114ca4acea35a14c402cfa3dc2228446e78dc1b81fa4325cf7e314a9cad6a6bdff33b3351dcba74eb15fae67f1227283aa4cdd64bcadf8f19358333f8549b596f4350297b5c65274565869d497398339947b9d3d064e5b06d39d34b436d8a41c1a3880de10bd26c3b1c5b4e2a49b0d4d07b8d90cd9e92bc611564d19ea8ec33099e92033caf21f5307dbeaa4708b99eb313bff99e2081ac25fd12d6a72e8335e0724f6718fe023cd0ad0d6e6a6309f09c9c391eec2bc08e9c3210a043c08e1759f354c121f6517fff4d6e20711a871e41285d48d930352fddffb92c96ba57df045ce99f8bfdfa8edc0969ce68a51e9fbb4f54b956d9df74a9e4af27ed2b27839bce1cffeca8333c0aaee81a570217442f9029ba8fedb84a2cf4be4d910982d891ea00e816c7fb98e8020e896a9c6fdd9106611da0a99dde18df1b7a8f6327acb1eed9ad93314451e48cb0dfb9571728521ca3db2ac0968159d5622556a55d51a422d11995b650949aaefc5d24c16080446dfc4fbc10353f9f93ce161ab513367bb89ab83988e0630b689e174e27bcfcc31996ee7b0bca909e251b82d69a28fee5a5d662e127508cd19dbbe5097b7d5b62a49203d66764197a527e472e2627e44a93d44177dace9d60e7d0e03305ddf4cfe47cdf2362e14de79ef46a6763ce696cd7854a48d9419a0817507a4713ffd4977b906d4f2b5fb6dbe1bd15bc505d5fea582190bf531a45d5ee026da8918547fd5105f15e5d061c7b0cf80a34990366ed8e91e13c2f0d85e5dad537298808d193cf54b7eaac33f10051f74cb6b75e52f81618c36f03d86aef613ba237a1a793ba1539938a38f62ccaf7bd5f6c5e0ce53cde4012fcf2b758214a0422d2faaa798e86e19d7481b42df2b36a73d287ff28c20cce01ce598771fec16a8f1f00305c06010126013a6c1de9f589b4e79d693717cd88ad1c42a2d99fa96617ba0bc6365b68e21a70ebc447904aa27979e1514433cfd83bfec09f137c747d47582cb63eb28f873fb94cf7a59ff764ddfbb687d79a58bb10f85949269f7f72c611a5e0fbb52adfa298ff060ec2eb7216fd7302ea8fb07798cbb3be25cb53ac8161aac2b5bbcfbcfb01c113d28bd1cb0333fb89ac82a95930f7abded0a2f5a623cc6a1f62bf3f38ef1b81c1e50a634f657dbb6770e4af45879e2fb1e00c742e7b52205c8015b5c0f5b1e40186ff9aa7288ab3e01a51fb87761f9bc6837082af109b39cc9f620")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
metadata, err := sniff.QUICClientHello(context.Background(), pkt)
|
var metadata adapter.InboundContext
|
||||||
|
err = sniff.QUICClientHello(context.Background(), &metadata, pkt)
|
||||||
|
require.Equal(t, metadata.Protocol, C.ProtocolQUIC)
|
||||||
|
require.Equal(t, metadata.Client, C.ClientChromium)
|
||||||
|
require.ErrorIs(t, err, sniff.ErrClientHelloFragmented)
|
||||||
|
pkt, err = hex.DecodeString("c90000000108f40d654cc09b27f5000044d073eb38807026d4088455e650e7ccf750d01a72f15f9bfc8ff40d223499db1a485cff14dbd45b9be118172834dc35dca3cf62f61a1266f40b92faf3d28d67a466cfdca678ddced15cd606d31959cf441828467857b226d1a241847c82c57312cefe68ba5042d929919bcd4403b39e5699fe87dda05df1b3801e048edee792458e9b1a9b1d4039df05847bcee3be567494b5876e3bd4c3220fe9dfdb2c07d77410f907f744251ef15536cc03b267d3668d5b75bc1ad2fe735cd3bb73519dd9f1625a49e17ad27bdeccf706c83b5ea339a0a05dd0072f4a8f162bd29926b4997f05613c6e4b0270b0c02805ca0543f27c1ff8505a5750bdd33529ee73c491050a10c6903f53c1121dbe0380e84c007c8df74a1b02443ed80ba7766aef5549e618d4fd249844ee28565142005369869299e8c3035ecef3d799f6cada8549e75b4ce4cbf4c85ef071fd7ff067b1ca9b5968dc41d13d011f6d7843823bac97acb1eb8ee45883f0f254b5f9bd4c763b67e2d8c70a7618a0ef0de304cf597a485126e09f8b2fd795b394c0b4bc4cd2634c2057970da2c798c5e8af7aed4f76f5e25d04e3f8c9c5a5b150d17e0d4c74229898c69b8dc7b8bcc9d359eb441de75c68fbdebec62fb669dcccfb1aad03e3fa073adb2ccf7bb14cbaf99e307d2c903ee71a8f028102eb510caee7e7397512086a78d1f95635c7d06845b5a708652dc4e5cd61245aae5b3c05b84815d84d367bce9b9e3f6d6b90701ac3679233c14d5ce2a1eff26469c966266dc6284bdb95c9c6158934c413a872ce22101e4163e3293d236b301592ca4ccacc1fd4c37066e79c2d9857c8a2560dcf0b33b19163c4240c471b19907476e7e25c65f7eb37276594a0f6b4c33c340cc3284178f17ac5e34dbe7509db890e4ddfd0540fbf9deb32a0101d24fe58b26c5f81c627db9d6ae59d7a111a3d5d1f6109f4eec0d0234e6d73c73a44f50999462724b51ce0fd8283535d70d9e83872c79c59897407a0736741011ae5c64862eb0712f9e7b07aa1d5418ca3fde8626257c6fe418f3c5479055bb2b0ab4c25f649923fc2a41c79aaa7d0f3af6d8b8cf06f61f0230d09bbb60bb49b9e49cc5973748a6cf7ffdee7804d424f9423c63e7ff22f4bd24e4867636ef9fe8dd37f59941a8a47c27765caa8e875a30b62834f17c569227e5e6ed15d58e05d36e76332befad065a2cd4079e66d5af189b0337624c89b1560c3b1b0befd5c1f20e6de8e3d664b3ac06b3d154b488983e14aa93266f5f8b621d2a9bb7ccce509eb26e025c9c45f7cccc09ce85b3103af0c93ce9822f82ecb168ca3177829afb2ea0da2c380e7b1728add55a5d42632e2290363d4cbe432b67e13691648e1acfab22cf0d551eee857709b428bb78e27a45aff6eca301c02e4d13cf36cc2494fdd1aef8dede6e18febd79dca4c6964d09b91c25a08f0947c76ab5104de9404459c2edf5f4adb9dfd771be83656f77fbbafb1ad3281717066010be8778952495383c9f2cf0a38527228c662a35171c5981731f1af09bab842fe6c3162ad4152a4221f560eb6f9bea66b294ffbd3643da2fe34096da13c246505452540177a2a0a1a69106e5cfc279a4890fc3be2952f26be245f930e6c2d9e7e26ee960481e72b99594a1185b46b94b6436d00ba6c70ffe135d43907c92c6f1c09fb9453f103730714f5700fa4347f9715c774cb04a7218dacc66d9c2fade18b14e684aa7fc9ebda0a28")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, metadata.Domain, "cloudflare-quic.com")
|
err = sniff.QUICClientHello(context.Background(), &metadata, pkt)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, metadata.Domain, "google.com")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSniffQUICFragment(t *testing.T) {
|
func TestSniffUQUICChrome115(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
pkt, err := hex.DecodeString("cc00000001082e3d5d1b64040c55000044d0ccea69e773f6631c1d18b04ae9ee75fcfc34ef74fa62533c93534338a86f101a05d70e0697fb483063fa85db1c59ccfbda5c35234931d8524d8aac37eaaad649470a67794cd754b23c98695238b8363452333bc8c4858376b4166e001da2006e35cf98a91e11a56419b2786775284942d0f7163982f7c248867d12dd374957481dbc564013ff785e1916195eef671f725908f761099d992d69231336ba81d9e25fe2fa3a6eff4318a6ccf10176fc841a1b315f7b35c5b292266fc869d76ca533e7d14e86d82db2e22eacd350977e47d2e012d8a5891c5aaf2a0f4c2b2dae897c161e5b68cbb4dee952472bdc1e21504b8f02534ec4366ce3f8bf86efc78e0232778fbd554457567112abdcafcf6d4d8fcf35083c25d9495679614aba21696e338c62b585046cc55ba8c09c844361d889a47c3ea703b4e23545a9ab2c0bb369693a9ddfb5daffa85cf80fdd6ad66738664e5b0a551729b4955cff7255afcb04dee88c2f072c9de7400947a1bd9327ac5d012a33000ada021d4c03d249fb017d6ac9200b2f9436beab8183ddfbe2d8aee31ffb7df9e1cc181c1af80c39a89965d18ed12da8e3ebe2ae1fbe4b348f83ba19e3e3d1c9b22bcf03ab6ad9b30fe180623faa291ebad83bcd71d7b57f2f5e2f3b8e81d24fb70b2f2159239e8f21ffafef2747aba47d97ab4081e603c018b10678cf99cab1fb42156a14486fa435153979d7279fd22cd40af7088bfc7eff41af2f4b3c0c8864d0040d74dff427f7bffdb8c278474ea00311326cf4925471a8cf596cb92119f19e0f789490ba9cb77b98015a987d93e0324cf1a38b55109f00c3e6ddc5180fb107bf468323afec9bb49fd6a86418569789d66cafe3b8253c2aebb3af3782c1c54dd560487d031d28e6a6e23e159581bb1d47efc4da3fe1d169f9ffb0ca9ba61af0a38a92fde5bc5e6ec026e8378a6315a7b95abf1d2da790a391306ce74d0baf8e2ce648ca74c487f2c0a76a28a80cdf5bd34316eb607684fe7e6d9e83824a00e07660d0b90e3cddd61ebf10748263474afa88c300549e64ce2e90560bb1a12dee7e9484f729a8a4ee7c5651adb5194b3b3ae38e501567c7dbf36e7bb37a2c20b74655f47f2d9af18e52e9d4c9c9eee8e63745779b8f0b06f3a09d846ba62eb978ad77c85de1ee2fee3fbb4c2d283c73e1ccba56a4658e48a2665d200f7f9342f8e84c2ba490094a4f94feec89e42d2f654f564c2beb2997bafa1fc2c68ad8e160b63587d49abc31b834878d52acfb05fb73d0e059b206162e3c90b40c4bc08407ffcb3c08431895b691a3fea923f1f3b48db75d3e6b91fd319ffe4d486e0e14bd5c6affc838dee63d9e0b80f169b5e6c02c7321dcb20deb2b8e707b60e345a308d505bbf26a93d8f18b39d62632e9a77cbe48b3b32eb8819d6311a49820d40f5acbf0273c91c36b2269a03e72ee64df3dfb10ddefe73c64ef60870b2b77bd99dea655f5fe791b538a929a14d99f6d69685d72431ea5f0f4b27a044f2f575ab474fcc3857895934de1ca2581798eaef2c17fe5aaf2e6add97fa32997c7026f15c1b1ad0e6043ae506027a7c0242546fdc851cca39a204e56879f2cef838be8ec66e0f2292f8c862e06f810eb9b80c7a467ce6e90155206352c7f82b1173ba3b98d35bb72c259a60db20dd1a43fe6d7aef0265e6eaa5caafd9b64b448ff745a2046acbdb65cf2a5007809808a4828dc99097feedc734c236260c584")
|
pkt, err := hex.DecodeString("cb0000000108181e17c387120abc000044d0705b6a3ef9ee37a8d3949a7d393ed078243c2ee2c3627fad1c3f107c117f4f071131ad61848068fcbbe5c65803c147f7f8ec5e2cd77b77beea23ba779d936dccac540f8396400e3190ea35cc2942af4171a04cb14272491920f90124959f44e80143678c0b52f5d31af319aaa589db2f940f004562724d0af40f737e1bb0002a071e6a1dbc9f52c64f070806a5010abed0298053634d9c9126bd7949ae5087998ade762c0ad06691d99c0875a38c601fc1ee77bfc3b8c11381829f2c9bdd022f4499c43ff1d6aee1a0d296861461dda217d22c568b276016ef3929e59d2f7d7ddf7809920fb7dc805641608949f3f8466ab3d37149aac501f0b107d808f3add4acfc657e4a82e2b88e97a6c74a00c419548760ab3414ba13915c78a1ca79dceee8d59fbe299f20b671ac44823218368b2a026baa55170cf549519ac21dbb6d31d248bd339438a4e663bcdca1fe3ae3f045a5dc19b122e9db9d7af9757076666dda4e9ace1c67def77fa14786f0cab3ebf7a270ea6e2b37838318c95779f80c3b8471948d0046c3614b3a13477c939a39a7855d85d13522a45ae0765739cd5eedef87237e824a929983ace27640c6495dbf5a72fa0b96893dc5d28f3988249a57bdb458d460b4a57043de3da750a76b6e5d2259247ca27cd864ea18f0d09aa62ab6eb7c014fb43179b2a1963d170b756cce83eeaebff78a828d025c811848e16ff862a8080d093478cd2208c8ab0803178325bc0d9d6bb25e62fa50c4ad15cf80916da6578796932036c72e43eb480d1e423ed812ac75a97722f8416529b82ba8ee2219c535012282bb17066bd53e78b87a71abdb7ebdb2a7c2766ff8397962e87d0f85485b64b4ee81cc84f99c47f33f2b0872716441992773f59186e38d32dbf5609a6fda94cb928cd25f5a7a3ab736b5a4236b6d5409ab18892c6a4d3480fc2350abfdf0bab1cedb55bdf0760fdb703e6688f4de596254eed4ed3e67eb03d0717b8e15b31e735214e588c87ae36bc6c310e1894b4c15143e4ccf287b2dbc707a946bf9671ae3c574f9486b2c82eec784bba4cbc76113cbe0f97ac8c13cfa38f2925ab9d06887a612ce48280a91d7e074e6caf898d88e2bbf71360899abf48a03f9a70cf2891199f2d63b116f4871af0ebb4f4906792f66cc21d1609f189138532875c129a68c73e7bcd3b5d8100beac1d8ac4b20d94a59ac8df5a5af58a9acb20413eadf97189f5f19ff889155f0c4d37514ec184eb6903967ff38a41fc087abb0f2cad3761d6e3f95f92a09a72f5c065b16e188088b87460241f27ecdb1bc6ece92c8d36b2d68b58d0fb4d4b3c928c579ade8ae5a995833aadd297c30a37f7bc35440fc97070e1b198e0fac00157452177d16d2803b4239997452b4ad3a951173bdec47a033fd7f8a7942accaa9aaa905b3c5a2175e7c3e07c48bf25331727fd69cd1e64d74d8c9d4a6f8f4491adb7bc911505cb19877083d8f21a12475e313fccf57877ff3556318e81ed9145dd9427f2b65275440893035f417481f721c69215af8ae103530cd0a1d35bf2cb5a27628f8d44d7c6f5ec12ce79d0a8333e0eb48771115d0a191304e46b8db19bbe5c40f1c346dde98e76ff5e21ff38d2c34e60cb07766ed529dd6d2cbacd7fbf1ed8a0e6e40decad0ca5021e91552be87c156d3ae2fffef41c65b14ba6d488f2c3227a1ab11ffce0e2dc47723a69da27a67a7f26e1cb13a7103af9b87a8db8e18ea")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
metadata, err := sniff.QUICClientHello(context.Background(), pkt)
|
var metadata adapter.InboundContext
|
||||||
|
err = sniff.QUICClientHello(context.Background(), &metadata, pkt)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, metadata.Domain, "cloudflare-quic.com")
|
require.Equal(t, metadata.Protocol, C.ProtocolQUIC)
|
||||||
|
require.Equal(t, metadata.Client, C.ClientQUICGo)
|
||||||
|
require.Equal(t, metadata.Domain, "www.google.com")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSniffQUICFirefox(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
pkt, err := hex.DecodeString("c8000000010867f174d7ebfe1b0803cd9c20004286de068f7963cf1736349ee6ebe0ddcd3e4cd0041a51ced3f7ce9eea1fb595458e74bdb4b792b16449bd8cae71419862c4fcbe766eaec7d1af65cd298e1dd46f8bd94a77ab4ca28c54b8e9773de3f02d7cb2463c9f7dcacfb311f024b0266ec6ab7bfb615b4148333fb4d4ece7c4cd90029ca30c2cbae2216b428499ec873fa125797e71c5a5da85087760ad37ca610020f71b76e82651c47576e20bf33cf676cb2d400b8c09d3c8cb4e21c47d2b21f6b68732bef30c8cefd5c723fc23eb29e6f7f65a5e52aad9055c1fb3d8b1811f0380b38d7e2eee8eb37dd5bd5d4ca4b66540175d916289d88a9df7c161964d713999c5057d27edb298ef5164352568b0d4bac3c15d90456e8fd460e41b81d0ec1b1e94b87d3333cc6908b018e0914ae1f214d73e75398da3d55a0106161d3a75897b4eb66e98c59010fae75f0d367d38be48c3a5c58bc8a30773c3fff50690ac9d487822f85d4f5713d626baa92d36e858dd21259cf814bce0b90d18da88a1ade40113e5a088cdb304a2558879152a8cf15c1839e056378aa41acba6fcb9974dee54bd50b5d4eb2c475654e06c0ec06b7f18f4462c808684843a1071041b9bfb2688324e0120144944416e30e83eedbbbcbc275b1f53762d3db18f0998ce54f0e1c512946b4098f07781d49264fa148f4c8220a3b02e73d7f15554aa370aafeff73cb75c52c494edf90f0261abfdd32a4d670f729de50266162687aa8efe14b8506f313b058b02aaaab5825428f5f4510b8e49451fdcb7b5a4af4b59c831afcb89fb4f64dba78e3b38387e87e9e8cdaa1f3b700a87c7d442388863b8950296e5773b38f308d62f52548c0bbf308e40540747cca5bf99b1345bc0d70b8f0e69a83b85a8d69f795b87f93e2bfccf52b529afea4ff6fd456957000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
|
||||||
|
require.NoError(t, err)
|
||||||
|
var metadata adapter.InboundContext
|
||||||
|
err = sniff.QUICClientHello(context.Background(), &metadata, pkt)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, metadata.Protocol, C.ProtocolQUIC)
|
||||||
|
require.Equal(t, metadata.Client, C.ClientFirefox)
|
||||||
|
require.Equal(t, metadata.Domain, "www.google.com")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSniffQUICSafari(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
pkt, err := hex.DecodeString("c70000000108e4e75af2e223198a0000449ef2d83cb4473a62765eba67424cd4a5817315cbf55a9e8daaca360904b0bae60b1629cfeba11e2dfbbf5ea4c588cb134e31af36fd7a409fb0fcc0187e9b56037ac37964ed20a8c1ca19fd6cfd53398324b3d0c71537294f769db208fa998b6811234a4a7eb3b5eceb457ae92e3a2d98f7c110702db8064b5c29fa3298eb1d0529fd445a84a5fd6ff8709be90f8af4f94998d8a8f2953bb05ad08c80668eca784c6aec959114e68e5b827e7c41c79f2277c716a967e7fcc8d1b77442e6cb18329dbedb34b473516b468cba5fc20659e655fbe37f36408289b9a475fcee091bd82828d3be00367e9e5cec9423bb97854abdada1d7562a3777756eb3bddef826ddc1ef46137cb01bb504a54d410d9bcb74cd5f959050c84edf343fa6a49708c228a758ee7adbbadf260b2f1984911489712e2cb364a3d6520badba4b7e539b9c163eeddfd96c0abb0de151e47496bb9750be76ee17ccdb61d35d2c6795174037d6f9d282c3f36c4d9a90b64f3b6ddd0cf4d9ed8e6f7805e25928fa04b087e63ae02761df30720cc01dfc32b64c575c8a66ef82e9a17400ff80cd8609b93ba16d668f4aa734e71c4a5d145f14ee1151bec970214e0ff83fc3e1e85d8694f2975f9155c57c18b7b69bb6a36832a9435f1f4b346a7be188f3a75f9ad2cc6ad0a3d26d6fa7d4c1179bd49bd5989d15ba43ff602890107db96484695086627356750d7b2b3b714ba65d564654e8f60ac10f5b6d3bfb507e8eaa31bab1da2d676195046d165c7f8b32829c9f9b68d97b2af7ac04a1369357e4b65de2b2f24eaf27cc8d95e05db001adebe726f927a94e43e62ce671e6e306e16f05aafcbe6c49080e80286d7939f375023d110a5ad9069364ae928ca480454a9dcddd61bc48b7efeb716a5bd6c7cd39c486ceb20c738af6abf22ba1ddd8b4a3b781fc2f251173409e1aadccbd7514e97106d0ebfc3af6e59445f74cd733a1ba99b10fce3fb4e9f7c88f5e25b567f5ba2b8dabacd375e7faf7634bfa178cbe51aee63032c5126b196ea47b02385fc3062a000fb7e4b4d0d12e74579f8830ede20d10829496032b2cc56743287f9a9b4d5091877a82fea44deb2cffac8a379f78a151d99e28cbc74d732c083bf06d50584e3f18f254e71a48d6ababaf6fff6f425e9be001510dfbe6a32a27792c00ada036b62ddb90c706d7b882c76a7072f5dd11c69a1f49d4ba183cb0b57545419fa27b9b9706098848935ae9c9e8fbe9fac165d1339128b991a73d20e7795e8d6a8c6adfbf20bf13ada43f2aef3ba78c14697910507132623f721387dce60c4707225b84d9782d469a5d9eaa099f35d6a590ef142ddef766495cf3337815ceef5ff2b3ed352637e72b5c23a2a8ff7d7440236a19b981d47f8e519a0431ebfbc0b78d8a36798b4c060c0c6793499f1e2e818862560a5b501c8d02ba1517be1941da2af5b174e0189c62978d878eb0f9c9db3a9221c28fb94645cf6e85ff2eea8c65ba3083a7382b131b83102dd67aa5453ad7375a4eb8c69fc479fbd29dab8924f801d253f2c997120b705c6e5217fb74702e2f1038917dd5fb0eeb7ae1bf7a668fc7d50c034b4cd5a057a8482e6bc9c921297f44e76967265623a167cd9883eb6e64bc77856dc333bd605d7df3bed0e5cecb5a99fe8b62873d58530f")
|
||||||
|
require.NoError(t, err)
|
||||||
|
var metadata adapter.InboundContext
|
||||||
|
err = sniff.QUICClientHello(context.Background(), &metadata, pkt)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, metadata.Protocol, C.ProtocolQUIC)
|
||||||
|
require.Equal(t, metadata.Client, C.ClientSafari)
|
||||||
|
require.Equal(t, metadata.Domain, "www.google.com")
|
||||||
}
|
}
|
||||||
|
|
||||||
func FuzzSniffQUIC(f *testing.F) {
|
func FuzzSniffQUIC(f *testing.F) {
|
||||||
f.Fuzz(func(t *testing.T, data []byte) {
|
f.Fuzz(func(t *testing.T, data []byte) {
|
||||||
sniff.QUICClientHello(context.Background(), data)
|
var metadata adapter.InboundContext
|
||||||
|
err := sniff.QUICClientHello(context.Background(), &metadata, data)
|
||||||
|
require.Error(t, err)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
90
common/sniff/rdp.go
Normal file
90
common/sniff/rdp.go
Normal file
@ -0,0 +1,90 @@
|
|||||||
|
package sniff
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/binary"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/sagernet/sing-box/adapter"
|
||||||
|
C "github.com/sagernet/sing-box/constant"
|
||||||
|
"github.com/sagernet/sing/common/rw"
|
||||||
|
)
|
||||||
|
|
||||||
|
func RDP(_ context.Context, metadata *adapter.InboundContext, reader io.Reader) error {
|
||||||
|
var tpktVersion uint8
|
||||||
|
err := binary.Read(reader, binary.BigEndian, &tpktVersion)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if tpktVersion != 0x03 {
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
|
||||||
|
var tpktReserved uint8
|
||||||
|
err = binary.Read(reader, binary.BigEndian, &tpktReserved)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if tpktReserved != 0x00 {
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
|
||||||
|
var tpktLength uint16
|
||||||
|
err = binary.Read(reader, binary.BigEndian, &tpktLength)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if tpktLength != 19 {
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
|
||||||
|
var cotpLength uint8
|
||||||
|
err = binary.Read(reader, binary.BigEndian, &cotpLength)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if cotpLength != 14 {
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
|
||||||
|
var cotpTpduType uint8
|
||||||
|
err = binary.Read(reader, binary.BigEndian, &cotpTpduType)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if cotpTpduType != 0xE0 {
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
|
||||||
|
err = rw.SkipN(reader, 5)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var rdpType uint8
|
||||||
|
err = binary.Read(reader, binary.BigEndian, &rdpType)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if rdpType != 0x01 {
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
var rdpFlags uint8
|
||||||
|
err = binary.Read(reader, binary.BigEndian, &rdpFlags)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var rdpLength uint8
|
||||||
|
err = binary.Read(reader, binary.BigEndian, &rdpLength)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if rdpLength != 8 {
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
metadata.Protocol = C.ProtocolRDP
|
||||||
|
return nil
|
||||||
|
}
|
25
common/sniff/rdp_test.go
Normal file
25
common/sniff/rdp_test.go
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
package sniff_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/hex"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/sagernet/sing-box/adapter"
|
||||||
|
"github.com/sagernet/sing-box/common/sniff"
|
||||||
|
C "github.com/sagernet/sing-box/constant"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSniffRDP(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
pkt, err := hex.DecodeString("030000130ee00000000000010008000b000000010008000b000000")
|
||||||
|
require.NoError(t, err)
|
||||||
|
var metadata adapter.InboundContext
|
||||||
|
err = sniff.RDP(context.TODO(), &metadata, bytes.NewReader(pkt))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, C.ProtocolRDP, metadata.Protocol)
|
||||||
|
}
|
@ -14,8 +14,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
StreamSniffer = func(ctx context.Context, reader io.Reader) (*adapter.InboundContext, error)
|
StreamSniffer = func(ctx context.Context, metadata *adapter.InboundContext, reader io.Reader) error
|
||||||
PacketSniffer = func(ctx context.Context, packet []byte) (*adapter.InboundContext, error)
|
PacketSniffer = func(ctx context.Context, metadata *adapter.InboundContext, packet []byte) error
|
||||||
)
|
)
|
||||||
|
|
||||||
func Skip(metadata adapter.InboundContext) bool {
|
func Skip(metadata adapter.InboundContext) bool {
|
||||||
@ -34,7 +34,7 @@ func Skip(metadata adapter.InboundContext) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func PeekStream(ctx context.Context, conn net.Conn, buffer *buf.Buffer, timeout time.Duration, sniffers ...StreamSniffer) (*adapter.InboundContext, error) {
|
func PeekStream(ctx context.Context, metadata *adapter.InboundContext, conn net.Conn, buffer *buf.Buffer, timeout time.Duration, sniffers ...StreamSniffer) error {
|
||||||
if timeout == 0 {
|
if timeout == 0 {
|
||||||
timeout = C.ReadPayloadTimeout
|
timeout = C.ReadPayloadTimeout
|
||||||
}
|
}
|
||||||
@ -42,32 +42,31 @@ func PeekStream(ctx context.Context, conn net.Conn, buffer *buf.Buffer, timeout
|
|||||||
var errors []error
|
var errors []error
|
||||||
err := conn.SetReadDeadline(deadline)
|
err := conn.SetReadDeadline(deadline)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, E.Cause(err, "set read deadline")
|
return E.Cause(err, "set read deadline")
|
||||||
}
|
}
|
||||||
defer conn.SetReadDeadline(time.Time{})
|
defer conn.SetReadDeadline(time.Time{})
|
||||||
var metadata *adapter.InboundContext
|
|
||||||
for _, sniffer := range sniffers {
|
for _, sniffer := range sniffers {
|
||||||
if buffer.IsEmpty() {
|
if buffer.IsEmpty() {
|
||||||
metadata, err = sniffer(ctx, io.TeeReader(conn, buffer))
|
err = sniffer(ctx, metadata, io.TeeReader(conn, buffer))
|
||||||
} else {
|
} else {
|
||||||
metadata, err = sniffer(ctx, io.MultiReader(bytes.NewReader(buffer.Bytes()), io.TeeReader(conn, buffer)))
|
err = sniffer(ctx, metadata, io.MultiReader(bytes.NewReader(buffer.Bytes()), io.TeeReader(conn, buffer)))
|
||||||
}
|
}
|
||||||
if metadata != nil {
|
if err == nil {
|
||||||
return metadata, nil
|
return nil
|
||||||
}
|
}
|
||||||
errors = append(errors, err)
|
errors = append(errors, err)
|
||||||
}
|
}
|
||||||
return nil, E.Errors(errors...)
|
return E.Errors(errors...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func PeekPacket(ctx context.Context, packet []byte, sniffers ...PacketSniffer) (*adapter.InboundContext, error) {
|
func PeekPacket(ctx context.Context, metadata *adapter.InboundContext, packet []byte, sniffers ...PacketSniffer) error {
|
||||||
var errors []error
|
var errors []error
|
||||||
for _, sniffer := range sniffers {
|
for _, sniffer := range sniffers {
|
||||||
metadata, err := sniffer(ctx, packet)
|
err := sniffer(ctx, metadata, packet)
|
||||||
if metadata != nil {
|
if err == nil {
|
||||||
return metadata, nil
|
return nil
|
||||||
}
|
}
|
||||||
errors = append(errors, err)
|
errors = append(errors, err)
|
||||||
}
|
}
|
||||||
return nil, E.Errors(errors...)
|
return E.Errors(errors...)
|
||||||
}
|
}
|
||||||
|
26
common/sniff/ssh.go
Normal file
26
common/sniff/ssh.go
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
package sniff
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/sagernet/sing-box/adapter"
|
||||||
|
C "github.com/sagernet/sing-box/constant"
|
||||||
|
)
|
||||||
|
|
||||||
|
func SSH(_ context.Context, metadata *adapter.InboundContext, reader io.Reader) error {
|
||||||
|
scanner := bufio.NewScanner(reader)
|
||||||
|
if !scanner.Scan() {
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
fistLine := scanner.Text()
|
||||||
|
if !strings.HasPrefix(fistLine, "SSH-2.0-") {
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
metadata.Protocol = C.ProtocolSSH
|
||||||
|
metadata.Client = fistLine[8:]
|
||||||
|
return nil
|
||||||
|
}
|
26
common/sniff/ssh_test.go
Normal file
26
common/sniff/ssh_test.go
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
package sniff_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/hex"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/sagernet/sing-box/adapter"
|
||||||
|
"github.com/sagernet/sing-box/common/sniff"
|
||||||
|
C "github.com/sagernet/sing-box/constant"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSniffSSH(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
pkt, err := hex.DecodeString("5353482d322e302d64726f70626561720d0a000001a40a1492892570d1223aef61b0d647972c8bd30000009f637572766532353531392d7368613235362c637572766532353531392d736861323536406c69627373682e6f72672c6469666669652d68656c6c6d616e2d67726f757031342d7368613235362c6469666669652d68656c6c6d616e2d67726f757031342d736861312c6b6578677565737332406d6174742e7563632e61736e2e61752c6b65782d7374726963742d732d763030406f70656e7373682e636f6d000000207373682d656432353531392c7273612d736861322d3235362c7373682d7273610000003363686163686132302d706f6c7931333035406f70656e7373682e636f6d2c6165733132382d6374722c6165733235362d6374720000003363686163686132302d706f6c7931333035406f70656e7373682e636f6d2c6165733132382d6374722c6165733235362d63747200000017686d61632d736861312c686d61632d736861322d32353600000017686d61632d736861312c686d61632d736861322d323536000000046e6f6e65000000046e6f6e65000000000000000000000000002aa6ed090585b7d635b6")
|
||||||
|
require.NoError(t, err)
|
||||||
|
var metadata adapter.InboundContext
|
||||||
|
err = sniff.SSH(context.TODO(), &metadata, bytes.NewReader(pkt))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, C.ProtocolSSH, metadata.Protocol)
|
||||||
|
require.Equal(t, "dropbear", metadata.Client)
|
||||||
|
}
|
@ -9,16 +9,17 @@ import (
|
|||||||
C "github.com/sagernet/sing-box/constant"
|
C "github.com/sagernet/sing-box/constant"
|
||||||
)
|
)
|
||||||
|
|
||||||
func STUNMessage(ctx context.Context, packet []byte) (*adapter.InboundContext, error) {
|
func STUNMessage(_ context.Context, metadata *adapter.InboundContext, packet []byte) error {
|
||||||
pLen := len(packet)
|
pLen := len(packet)
|
||||||
if pLen < 20 {
|
if pLen < 20 {
|
||||||
return nil, os.ErrInvalid
|
return os.ErrInvalid
|
||||||
}
|
}
|
||||||
if binary.BigEndian.Uint32(packet[4:8]) != 0x2112A442 {
|
if binary.BigEndian.Uint32(packet[4:8]) != 0x2112A442 {
|
||||||
return nil, os.ErrInvalid
|
return os.ErrInvalid
|
||||||
}
|
}
|
||||||
if len(packet) < 20+int(binary.BigEndian.Uint16(packet[2:4])) {
|
if len(packet) < 20+int(binary.BigEndian.Uint16(packet[2:4])) {
|
||||||
return nil, os.ErrInvalid
|
return os.ErrInvalid
|
||||||
}
|
}
|
||||||
return &adapter.InboundContext{Protocol: C.ProtocolSTUN}, nil
|
metadata.Protocol = C.ProtocolSTUN
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -5,6 +5,7 @@ import (
|
|||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/sagernet/sing-box/adapter"
|
||||||
"github.com/sagernet/sing-box/common/sniff"
|
"github.com/sagernet/sing-box/common/sniff"
|
||||||
C "github.com/sagernet/sing-box/constant"
|
C "github.com/sagernet/sing-box/constant"
|
||||||
|
|
||||||
@ -15,14 +16,16 @@ func TestSniffSTUN(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
packet, err := hex.DecodeString("000100002112a44224b1a025d0c180c484341306")
|
packet, err := hex.DecodeString("000100002112a44224b1a025d0c180c484341306")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
metadata, err := sniff.STUNMessage(context.Background(), packet)
|
var metadata adapter.InboundContext
|
||||||
|
err = sniff.STUNMessage(context.Background(), &metadata, packet)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, metadata.Protocol, C.ProtocolSTUN)
|
require.Equal(t, metadata.Protocol, C.ProtocolSTUN)
|
||||||
}
|
}
|
||||||
|
|
||||||
func FuzzSniffSTUN(f *testing.F) {
|
func FuzzSniffSTUN(f *testing.F) {
|
||||||
f.Fuzz(func(t *testing.T, data []byte) {
|
f.Fuzz(func(t *testing.T, data []byte) {
|
||||||
if _, err := sniff.STUNMessage(context.Background(), data); err == nil {
|
var metadata adapter.InboundContext
|
||||||
|
if err := sniff.STUNMessage(context.Background(), &metadata, data); err == nil {
|
||||||
t.Fail()
|
t.Fail()
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
@ -10,7 +10,7 @@ import (
|
|||||||
"github.com/sagernet/sing/common/bufio"
|
"github.com/sagernet/sing/common/bufio"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TLSClientHello(ctx context.Context, reader io.Reader) (*adapter.InboundContext, error) {
|
func TLSClientHello(ctx context.Context, metadata *adapter.InboundContext, reader io.Reader) error {
|
||||||
var clientHello *tls.ClientHelloInfo
|
var clientHello *tls.ClientHelloInfo
|
||||||
err := tls.Server(bufio.NewReadOnlyConn(reader), &tls.Config{
|
err := tls.Server(bufio.NewReadOnlyConn(reader), &tls.Config{
|
||||||
GetConfigForClient: func(argHello *tls.ClientHelloInfo) (*tls.Config, error) {
|
GetConfigForClient: func(argHello *tls.ClientHelloInfo) (*tls.Config, error) {
|
||||||
@ -19,7 +19,9 @@ func TLSClientHello(ctx context.Context, reader io.Reader) (*adapter.InboundCont
|
|||||||
},
|
},
|
||||||
}).HandshakeContext(ctx)
|
}).HandshakeContext(ctx)
|
||||||
if clientHello != nil {
|
if clientHello != nil {
|
||||||
return &adapter.InboundContext{Protocol: C.ProtocolTLS, Domain: clientHello.ServerName}, nil
|
metadata.Protocol = C.ProtocolTLS
|
||||||
|
metadata.Domain = clientHello.ServerName
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package srs
|
package srs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bufio"
|
||||||
"compress/zlib"
|
"compress/zlib"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"io"
|
"io"
|
||||||
@ -11,7 +12,7 @@ import (
|
|||||||
"github.com/sagernet/sing/common"
|
"github.com/sagernet/sing/common"
|
||||||
"github.com/sagernet/sing/common/domain"
|
"github.com/sagernet/sing/common/domain"
|
||||||
E "github.com/sagernet/sing/common/exceptions"
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
"github.com/sagernet/sing/common/rw"
|
"github.com/sagernet/sing/common/varbin"
|
||||||
|
|
||||||
"go4.org/netipx"
|
"go4.org/netipx"
|
||||||
)
|
)
|
||||||
@ -35,17 +36,18 @@ const (
|
|||||||
ruleItemPackageName
|
ruleItemPackageName
|
||||||
ruleItemWIFISSID
|
ruleItemWIFISSID
|
||||||
ruleItemWIFIBSSID
|
ruleItemWIFIBSSID
|
||||||
|
ruleItemAdGuardDomain
|
||||||
ruleItemFinal uint8 = 0xFF
|
ruleItemFinal uint8 = 0xFF
|
||||||
)
|
)
|
||||||
|
|
||||||
func Read(reader io.Reader, recovery bool) (ruleSet option.PlainRuleSet, err error) {
|
func Read(reader io.Reader, recover bool) (ruleSet option.PlainRuleSet, err error) {
|
||||||
var magicBytes [3]byte
|
var magicBytes [3]byte
|
||||||
_, err = io.ReadFull(reader, magicBytes[:])
|
_, err = io.ReadFull(reader, magicBytes[:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if magicBytes != MagicBytes {
|
if magicBytes != MagicBytes {
|
||||||
err = E.New("invalid sing-box rule set file")
|
err = E.New("invalid sing-box rule-set file")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
var version uint8
|
var version uint8
|
||||||
@ -53,20 +55,21 @@ func Read(reader io.Reader, recovery bool) (ruleSet option.PlainRuleSet, err err
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return ruleSet, err
|
return ruleSet, err
|
||||||
}
|
}
|
||||||
if version != 1 {
|
if version > C.RuleSetVersion2 {
|
||||||
return ruleSet, E.New("unsupported version: ", version)
|
return ruleSet, E.New("unsupported version: ", version)
|
||||||
}
|
}
|
||||||
zReader, err := zlib.NewReader(reader)
|
compressReader, err := zlib.NewReader(reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
length, err := rw.ReadUVariant(zReader)
|
bReader := bufio.NewReader(compressReader)
|
||||||
|
length, err := binary.ReadUvarint(bReader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
ruleSet.Rules = make([]option.HeadlessRule, length)
|
ruleSet.Rules = make([]option.HeadlessRule, length)
|
||||||
for i := uint64(0); i < length; i++ {
|
for i := uint64(0); i < length; i++ {
|
||||||
ruleSet.Rules[i], err = readRule(zReader, recovery)
|
ruleSet.Rules[i], err = readRule(bReader, recover)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = E.Cause(err, "read rule[", i, "]")
|
err = E.Cause(err, "read rule[", i, "]")
|
||||||
return
|
return
|
||||||
@ -75,33 +78,44 @@ func Read(reader io.Reader, recovery bool) (ruleSet option.PlainRuleSet, err err
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func Write(writer io.Writer, ruleSet option.PlainRuleSet) error {
|
func Write(writer io.Writer, ruleSet option.PlainRuleSet, generateUnstable bool) error {
|
||||||
_, err := writer.Write(MagicBytes[:])
|
_, err := writer.Write(MagicBytes[:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err = binary.Write(writer, binary.BigEndian, uint8(1))
|
var version uint8
|
||||||
|
if generateUnstable {
|
||||||
|
version = C.RuleSetVersion2
|
||||||
|
} else {
|
||||||
|
version = C.RuleSetVersion1
|
||||||
|
}
|
||||||
|
err = binary.Write(writer, binary.BigEndian, version)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
zWriter, err := zlib.NewWriterLevel(writer, zlib.BestCompression)
|
compressWriter, err := zlib.NewWriterLevel(writer, zlib.BestCompression)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err = rw.WriteUVariant(zWriter, uint64(len(ruleSet.Rules)))
|
bWriter := bufio.NewWriter(compressWriter)
|
||||||
|
_, err = varbin.WriteUvarint(bWriter, uint64(len(ruleSet.Rules)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for _, rule := range ruleSet.Rules {
|
for _, rule := range ruleSet.Rules {
|
||||||
err = writeRule(zWriter, rule)
|
err = writeRule(bWriter, rule, generateUnstable)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return zWriter.Close()
|
err = bWriter.Flush()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return compressWriter.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
func readRule(reader io.Reader, recovery bool) (rule option.HeadlessRule, err error) {
|
func readRule(reader varbin.Reader, recover bool) (rule option.HeadlessRule, err error) {
|
||||||
var ruleType uint8
|
var ruleType uint8
|
||||||
err = binary.Read(reader, binary.BigEndian, &ruleType)
|
err = binary.Read(reader, binary.BigEndian, &ruleType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -110,28 +124,28 @@ func readRule(reader io.Reader, recovery bool) (rule option.HeadlessRule, err er
|
|||||||
switch ruleType {
|
switch ruleType {
|
||||||
case 0:
|
case 0:
|
||||||
rule.Type = C.RuleTypeDefault
|
rule.Type = C.RuleTypeDefault
|
||||||
rule.DefaultOptions, err = readDefaultRule(reader, recovery)
|
rule.DefaultOptions, err = readDefaultRule(reader, recover)
|
||||||
case 1:
|
case 1:
|
||||||
rule.Type = C.RuleTypeLogical
|
rule.Type = C.RuleTypeLogical
|
||||||
rule.LogicalOptions, err = readLogicalRule(reader, recovery)
|
rule.LogicalOptions, err = readLogicalRule(reader, recover)
|
||||||
default:
|
default:
|
||||||
err = E.New("unknown rule type: ", ruleType)
|
err = E.New("unknown rule type: ", ruleType)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeRule(writer io.Writer, rule option.HeadlessRule) error {
|
func writeRule(writer varbin.Writer, rule option.HeadlessRule, generateUnstable bool) error {
|
||||||
switch rule.Type {
|
switch rule.Type {
|
||||||
case C.RuleTypeDefault:
|
case C.RuleTypeDefault:
|
||||||
return writeDefaultRule(writer, rule.DefaultOptions)
|
return writeDefaultRule(writer, rule.DefaultOptions, generateUnstable)
|
||||||
case C.RuleTypeLogical:
|
case C.RuleTypeLogical:
|
||||||
return writeLogicalRule(writer, rule.LogicalOptions)
|
return writeLogicalRule(writer, rule.LogicalOptions, generateUnstable)
|
||||||
default:
|
default:
|
||||||
panic("unknown rule type: " + rule.Type)
|
panic("unknown rule type: " + rule.Type)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func readDefaultRule(reader io.Reader, recovery bool) (rule option.DefaultHeadlessRule, err error) {
|
func readDefaultRule(reader varbin.Reader, recover bool) (rule option.DefaultHeadlessRule, err error) {
|
||||||
var lastItemType uint8
|
var lastItemType uint8
|
||||||
for {
|
for {
|
||||||
var itemType uint8
|
var itemType uint8
|
||||||
@ -158,6 +172,9 @@ func readDefaultRule(reader io.Reader, recovery bool) (rule option.DefaultHeadle
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
rule.DomainMatcher = matcher
|
rule.DomainMatcher = matcher
|
||||||
|
if recover {
|
||||||
|
rule.Domain, rule.DomainSuffix = matcher.Dump()
|
||||||
|
}
|
||||||
case ruleItemDomainKeyword:
|
case ruleItemDomainKeyword:
|
||||||
rule.DomainKeyword, err = readRuleItemString(reader)
|
rule.DomainKeyword, err = readRuleItemString(reader)
|
||||||
case ruleItemDomainRegex:
|
case ruleItemDomainRegex:
|
||||||
@ -167,7 +184,7 @@ func readDefaultRule(reader io.Reader, recovery bool) (rule option.DefaultHeadle
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if recovery {
|
if recover {
|
||||||
rule.SourceIPCIDR = common.Map(rule.SourceIPSet.Prefixes(), netip.Prefix.String)
|
rule.SourceIPCIDR = common.Map(rule.SourceIPSet.Prefixes(), netip.Prefix.String)
|
||||||
}
|
}
|
||||||
case ruleItemIPCIDR:
|
case ruleItemIPCIDR:
|
||||||
@ -175,7 +192,7 @@ func readDefaultRule(reader io.Reader, recovery bool) (rule option.DefaultHeadle
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if recovery {
|
if recover {
|
||||||
rule.IPCIDR = common.Map(rule.IPSet.Prefixes(), netip.Prefix.String)
|
rule.IPCIDR = common.Map(rule.IPSet.Prefixes(), netip.Prefix.String)
|
||||||
}
|
}
|
||||||
case ruleItemSourcePort:
|
case ruleItemSourcePort:
|
||||||
@ -196,6 +213,17 @@ func readDefaultRule(reader io.Reader, recovery bool) (rule option.DefaultHeadle
|
|||||||
rule.WIFISSID, err = readRuleItemString(reader)
|
rule.WIFISSID, err = readRuleItemString(reader)
|
||||||
case ruleItemWIFIBSSID:
|
case ruleItemWIFIBSSID:
|
||||||
rule.WIFIBSSID, err = readRuleItemString(reader)
|
rule.WIFIBSSID, err = readRuleItemString(reader)
|
||||||
|
case ruleItemAdGuardDomain:
|
||||||
|
if recover {
|
||||||
|
err = E.New("unable to decompile binary AdGuard rules to rule-set")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var matcher *domain.AdGuardMatcher
|
||||||
|
matcher, err = domain.ReadAdGuardMatcher(reader)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
rule.AdGuardDomainMatcher = matcher
|
||||||
case ruleItemFinal:
|
case ruleItemFinal:
|
||||||
err = binary.Read(reader, binary.BigEndian, &rule.Invert)
|
err = binary.Read(reader, binary.BigEndian, &rule.Invert)
|
||||||
return
|
return
|
||||||
@ -209,7 +237,7 @@ func readDefaultRule(reader io.Reader, recovery bool) (rule option.DefaultHeadle
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeDefaultRule(writer io.Writer, rule option.DefaultHeadlessRule) error {
|
func writeDefaultRule(writer varbin.Writer, rule option.DefaultHeadlessRule, generateUnstable bool) error {
|
||||||
err := binary.Write(writer, binary.BigEndian, uint8(0))
|
err := binary.Write(writer, binary.BigEndian, uint8(0))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -233,7 +261,7 @@ func writeDefaultRule(writer io.Writer, rule option.DefaultHeadlessRule) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err = domain.NewMatcher(rule.Domain, rule.DomainSuffix).Write(writer)
|
err = domain.NewMatcher(rule.Domain, rule.DomainSuffix, !generateUnstable).Write(writer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -316,6 +344,16 @@ func writeDefaultRule(writer io.Writer, rule option.DefaultHeadlessRule) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if len(rule.AdGuardDomain) > 0 {
|
||||||
|
err = binary.Write(writer, binary.BigEndian, ruleItemAdGuardDomain)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = domain.NewAdGuardMatcher(rule.AdGuardDomain).Write(writer)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
err = binary.Write(writer, binary.BigEndian, ruleItemFinal)
|
err = binary.Write(writer, binary.BigEndian, ruleItemFinal)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -327,73 +365,31 @@ func writeDefaultRule(writer io.Writer, rule option.DefaultHeadlessRule) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func readRuleItemString(reader io.Reader) ([]string, error) {
|
func readRuleItemString(reader varbin.Reader) ([]string, error) {
|
||||||
length, err := rw.ReadUVariant(reader)
|
return varbin.ReadValue[[]string](reader, binary.BigEndian)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
value := make([]string, length)
|
|
||||||
for i := uint64(0); i < length; i++ {
|
|
||||||
value[i], err = rw.ReadVString(reader)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return value, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeRuleItemString(writer io.Writer, itemType uint8, value []string) error {
|
func writeRuleItemString(writer varbin.Writer, itemType uint8, value []string) error {
|
||||||
err := binary.Write(writer, binary.BigEndian, itemType)
|
err := writer.WriteByte(itemType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err = rw.WriteUVariant(writer, uint64(len(value)))
|
return varbin.Write(writer, binary.BigEndian, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func readRuleItemUint16(reader varbin.Reader) ([]uint16, error) {
|
||||||
|
return varbin.ReadValue[[]uint16](reader, binary.BigEndian)
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeRuleItemUint16(writer varbin.Writer, itemType uint8, value []uint16) error {
|
||||||
|
err := writer.WriteByte(itemType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for _, item := range value {
|
return varbin.Write(writer, binary.BigEndian, value)
|
||||||
err = rw.WriteVString(writer, item)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func readRuleItemUint16(reader io.Reader) ([]uint16, error) {
|
func writeRuleItemCIDR(writer varbin.Writer, itemType uint8, value []string) error {
|
||||||
length, err := rw.ReadUVariant(reader)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
value := make([]uint16, length)
|
|
||||||
for i := uint64(0); i < length; i++ {
|
|
||||||
err = binary.Read(reader, binary.BigEndian, &value[i])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return value, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeRuleItemUint16(writer io.Writer, itemType uint8, value []uint16) error {
|
|
||||||
err := binary.Write(writer, binary.BigEndian, itemType)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = rw.WriteUVariant(writer, uint64(len(value)))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, item := range value {
|
|
||||||
err = binary.Write(writer, binary.BigEndian, item)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeRuleItemCIDR(writer io.Writer, itemType uint8, value []string) error {
|
|
||||||
var builder netipx.IPSetBuilder
|
var builder netipx.IPSetBuilder
|
||||||
for i, prefixString := range value {
|
for i, prefixString := range value {
|
||||||
prefix, err := netip.ParsePrefix(prefixString)
|
prefix, err := netip.ParsePrefix(prefixString)
|
||||||
@ -419,9 +415,8 @@ func writeRuleItemCIDR(writer io.Writer, itemType uint8, value []string) error {
|
|||||||
return writeIPSet(writer, ipSet)
|
return writeIPSet(writer, ipSet)
|
||||||
}
|
}
|
||||||
|
|
||||||
func readLogicalRule(reader io.Reader, recovery bool) (logicalRule option.LogicalHeadlessRule, err error) {
|
func readLogicalRule(reader varbin.Reader, recovery bool) (logicalRule option.LogicalHeadlessRule, err error) {
|
||||||
var mode uint8
|
mode, err := reader.ReadByte()
|
||||||
err = binary.Read(reader, binary.BigEndian, &mode)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -434,7 +429,7 @@ func readLogicalRule(reader io.Reader, recovery bool) (logicalRule option.Logica
|
|||||||
err = E.New("unknown logical mode: ", mode)
|
err = E.New("unknown logical mode: ", mode)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
length, err := rw.ReadUVariant(reader)
|
length, err := binary.ReadUvarint(reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -453,7 +448,7 @@ func readLogicalRule(reader io.Reader, recovery bool) (logicalRule option.Logica
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeLogicalRule(writer io.Writer, logicalRule option.LogicalHeadlessRule) error {
|
func writeLogicalRule(writer varbin.Writer, logicalRule option.LogicalHeadlessRule, generateUnstable bool) error {
|
||||||
err := binary.Write(writer, binary.BigEndian, uint8(1))
|
err := binary.Write(writer, binary.BigEndian, uint8(1))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -469,12 +464,12 @@ func writeLogicalRule(writer io.Writer, logicalRule option.LogicalHeadlessRule)
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err = rw.WriteUVariant(writer, uint64(len(logicalRule.Rules)))
|
_, err = varbin.WriteUvarint(writer, uint64(len(logicalRule.Rules)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for _, rule := range logicalRule.Rules {
|
for _, rule := range logicalRule.Rules {
|
||||||
err = writeRule(writer, rule)
|
err = writeRule(writer, rule, generateUnstable)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -2,11 +2,13 @@ package srs
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"io"
|
|
||||||
"net/netip"
|
"net/netip"
|
||||||
|
"os"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
"github.com/sagernet/sing/common/rw"
|
"github.com/sagernet/sing/common"
|
||||||
|
M "github.com/sagernet/sing/common/metadata"
|
||||||
|
"github.com/sagernet/sing/common/varbin"
|
||||||
|
|
||||||
"go4.org/netipx"
|
"go4.org/netipx"
|
||||||
)
|
)
|
||||||
@ -20,94 +22,57 @@ type myIPRange struct {
|
|||||||
to netip.Addr
|
to netip.Addr
|
||||||
}
|
}
|
||||||
|
|
||||||
func readIPSet(reader io.Reader) (*netipx.IPSet, error) {
|
type myIPRangeData struct {
|
||||||
var version uint8
|
From []byte
|
||||||
err := binary.Read(reader, binary.BigEndian, &version)
|
To []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func readIPSet(reader varbin.Reader) (*netipx.IPSet, error) {
|
||||||
|
version, err := reader.ReadByte()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
if version != 1 {
|
||||||
|
return nil, os.ErrInvalid
|
||||||
|
}
|
||||||
|
// WTF why using uint64 here
|
||||||
var length uint64
|
var length uint64
|
||||||
err = binary.Read(reader, binary.BigEndian, &length)
|
err = binary.Read(reader, binary.BigEndian, &length)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
mySet := &myIPSet{
|
ranges := make([]myIPRangeData, length)
|
||||||
rr: make([]myIPRange, length),
|
err = varbin.Read(reader, binary.BigEndian, &ranges)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
for i := uint64(0); i < length; i++ {
|
mySet := &myIPSet{
|
||||||
var (
|
rr: make([]myIPRange, len(ranges)),
|
||||||
fromLen uint64
|
}
|
||||||
toLen uint64
|
for i, rangeData := range ranges {
|
||||||
fromAddr netip.Addr
|
mySet.rr[i].from = M.AddrFromIP(rangeData.From)
|
||||||
toAddr netip.Addr
|
mySet.rr[i].to = M.AddrFromIP(rangeData.To)
|
||||||
)
|
|
||||||
fromLen, err = rw.ReadUVariant(reader)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
fromBytes := make([]byte, fromLen)
|
|
||||||
_, err = io.ReadFull(reader, fromBytes)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
err = fromAddr.UnmarshalBinary(fromBytes)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
toLen, err = rw.ReadUVariant(reader)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
toBytes := make([]byte, toLen)
|
|
||||||
_, err = io.ReadFull(reader, toBytes)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
err = toAddr.UnmarshalBinary(toBytes)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
mySet.rr[i] = myIPRange{fromAddr, toAddr}
|
|
||||||
}
|
}
|
||||||
return (*netipx.IPSet)(unsafe.Pointer(mySet)), nil
|
return (*netipx.IPSet)(unsafe.Pointer(mySet)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeIPSet(writer io.Writer, set *netipx.IPSet) error {
|
func writeIPSet(writer varbin.Writer, set *netipx.IPSet) error {
|
||||||
err := binary.Write(writer, binary.BigEndian, uint8(1))
|
err := writer.WriteByte(1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
mySet := (*myIPSet)(unsafe.Pointer(set))
|
dataList := common.Map((*myIPSet)(unsafe.Pointer(set)).rr, func(rr myIPRange) myIPRangeData {
|
||||||
err = binary.Write(writer, binary.BigEndian, uint64(len(mySet.rr)))
|
return myIPRangeData{
|
||||||
|
From: rr.from.AsSlice(),
|
||||||
|
To: rr.to.AsSlice(),
|
||||||
|
}
|
||||||
|
})
|
||||||
|
err = binary.Write(writer, binary.BigEndian, uint64(len(dataList)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for _, rr := range mySet.rr {
|
for _, data := range dataList {
|
||||||
var (
|
err = varbin.Write(writer, binary.BigEndian, data)
|
||||||
fromBinary []byte
|
|
||||||
toBinary []byte
|
|
||||||
)
|
|
||||||
fromBinary, err = rr.from.MarshalBinary()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = rw.WriteUVariant(writer, uint64(len(fromBinary)))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = writer.Write(fromBinary)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
toBinary, err = rr.to.MarshalBinary()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = rw.WriteUVariant(writer, uint64(len(toBinary)))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = writer.Write(toBinary)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -11,12 +11,11 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
cftls "github.com/sagernet/cloudflare-tls"
|
cftls "github.com/sagernet/cloudflare-tls"
|
||||||
|
"github.com/sagernet/fswatch"
|
||||||
"github.com/sagernet/sing-box/log"
|
"github.com/sagernet/sing-box/log"
|
||||||
"github.com/sagernet/sing-box/option"
|
"github.com/sagernet/sing-box/option"
|
||||||
E "github.com/sagernet/sing/common/exceptions"
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
"github.com/sagernet/sing/common/ntp"
|
"github.com/sagernet/sing/common/ntp"
|
||||||
|
|
||||||
"github.com/fsnotify/fsnotify"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type echServerConfig struct {
|
type echServerConfig struct {
|
||||||
@ -26,9 +25,8 @@ type echServerConfig struct {
|
|||||||
key []byte
|
key []byte
|
||||||
certificatePath string
|
certificatePath string
|
||||||
keyPath string
|
keyPath string
|
||||||
watcher *fsnotify.Watcher
|
|
||||||
echKeyPath string
|
echKeyPath string
|
||||||
echWatcher *fsnotify.Watcher
|
watcher *fswatch.Watcher
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *echServerConfig) ServerName() string {
|
func (c *echServerConfig) ServerName() string {
|
||||||
@ -66,146 +64,84 @@ func (c *echServerConfig) Clone() Config {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *echServerConfig) Start() error {
|
func (c *echServerConfig) Start() error {
|
||||||
if c.certificatePath != "" && c.keyPath != "" {
|
err := c.startWatcher()
|
||||||
err := c.startWatcher()
|
if err != nil {
|
||||||
if err != nil {
|
c.logger.Warn("create credentials watcher: ", err)
|
||||||
c.logger.Warn("create fsnotify watcher: ", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if c.echKeyPath != "" {
|
|
||||||
err := c.startECHWatcher()
|
|
||||||
if err != nil {
|
|
||||||
c.logger.Warn("create fsnotify watcher: ", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *echServerConfig) startWatcher() error {
|
func (c *echServerConfig) startWatcher() error {
|
||||||
watcher, err := fsnotify.NewWatcher()
|
var watchPath []string
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if c.certificatePath != "" {
|
if c.certificatePath != "" {
|
||||||
err = watcher.Add(c.certificatePath)
|
watchPath = append(watchPath, c.certificatePath)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if c.keyPath != "" {
|
if c.keyPath != "" {
|
||||||
err = watcher.Add(c.keyPath)
|
watchPath = append(watchPath, c.keyPath)
|
||||||
if err != nil {
|
}
|
||||||
return err
|
if c.echKeyPath != "" {
|
||||||
}
|
watchPath = append(watchPath, c.echKeyPath)
|
||||||
|
}
|
||||||
|
if len(watchPath) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
watcher, err := fswatch.NewWatcher(fswatch.Options{
|
||||||
|
Path: watchPath,
|
||||||
|
Callback: func(path string) {
|
||||||
|
err := c.credentialsUpdated(path)
|
||||||
|
if err != nil {
|
||||||
|
c.logger.Error(E.Cause(err, "reload credentials from ", path))
|
||||||
|
}
|
||||||
|
},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
c.watcher = watcher
|
c.watcher = watcher
|
||||||
go c.loopUpdate()
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *echServerConfig) loopUpdate() {
|
func (c *echServerConfig) credentialsUpdated(path string) error {
|
||||||
for {
|
if path == c.certificatePath || path == c.keyPath {
|
||||||
select {
|
if path == c.certificatePath {
|
||||||
case event, ok := <-c.watcher.Events:
|
certificate, err := os.ReadFile(c.certificatePath)
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if event.Op&fsnotify.Write != fsnotify.Write {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
err := c.reloadKeyPair()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.logger.Error(E.Cause(err, "reload TLS key pair"))
|
return err
|
||||||
}
|
}
|
||||||
case err, ok := <-c.watcher.Errors:
|
c.certificate = certificate
|
||||||
if !ok {
|
} else {
|
||||||
return
|
key, err := os.ReadFile(c.keyPath)
|
||||||
}
|
|
||||||
c.logger.Error(E.Cause(err, "fsnotify error"))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *echServerConfig) reloadKeyPair() error {
|
|
||||||
if c.certificatePath != "" {
|
|
||||||
certificate, err := os.ReadFile(c.certificatePath)
|
|
||||||
if err != nil {
|
|
||||||
return E.Cause(err, "reload certificate from ", c.certificatePath)
|
|
||||||
}
|
|
||||||
c.certificate = certificate
|
|
||||||
}
|
|
||||||
if c.keyPath != "" {
|
|
||||||
key, err := os.ReadFile(c.keyPath)
|
|
||||||
if err != nil {
|
|
||||||
return E.Cause(err, "reload key from ", c.keyPath)
|
|
||||||
}
|
|
||||||
c.key = key
|
|
||||||
}
|
|
||||||
keyPair, err := cftls.X509KeyPair(c.certificate, c.key)
|
|
||||||
if err != nil {
|
|
||||||
return E.Cause(err, "reload key pair")
|
|
||||||
}
|
|
||||||
c.config.Certificates = []cftls.Certificate{keyPair}
|
|
||||||
c.logger.Info("reloaded TLS certificate")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *echServerConfig) startECHWatcher() error {
|
|
||||||
watcher, err := fsnotify.NewWatcher()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = watcher.Add(c.echKeyPath)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
c.echWatcher = watcher
|
|
||||||
go c.loopECHUpdate()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *echServerConfig) loopECHUpdate() {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case event, ok := <-c.echWatcher.Events:
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if event.Op&fsnotify.Write != fsnotify.Write {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
err := c.reloadECHKey()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.logger.Error(E.Cause(err, "reload ECH key"))
|
return err
|
||||||
}
|
}
|
||||||
case err, ok := <-c.echWatcher.Errors:
|
c.key = key
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
c.logger.Error(E.Cause(err, "fsnotify error"))
|
|
||||||
}
|
}
|
||||||
|
keyPair, err := cftls.X509KeyPair(c.certificate, c.key)
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "parse key pair")
|
||||||
|
}
|
||||||
|
c.config.Certificates = []cftls.Certificate{keyPair}
|
||||||
|
c.logger.Info("reloaded TLS certificate")
|
||||||
|
} else {
|
||||||
|
echKeyContent, err := os.ReadFile(c.echKeyPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
block, rest := pem.Decode(echKeyContent)
|
||||||
|
if block == nil || block.Type != "ECH KEYS" || len(rest) > 0 {
|
||||||
|
return E.New("invalid ECH keys pem")
|
||||||
|
}
|
||||||
|
echKeys, err := cftls.EXP_UnmarshalECHKeys(block.Bytes)
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "parse ECH keys")
|
||||||
|
}
|
||||||
|
echKeySet, err := cftls.EXP_NewECHKeySet(echKeys)
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "create ECH key set")
|
||||||
|
}
|
||||||
|
c.config.ServerECHProvider = echKeySet
|
||||||
|
c.logger.Info("reloaded ECH keys")
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
func (c *echServerConfig) reloadECHKey() error {
|
|
||||||
echKeyContent, err := os.ReadFile(c.echKeyPath)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
block, rest := pem.Decode(echKeyContent)
|
|
||||||
if block == nil || block.Type != "ECH KEYS" || len(rest) > 0 {
|
|
||||||
return E.New("invalid ECH keys pem")
|
|
||||||
}
|
|
||||||
echKeys, err := cftls.EXP_UnmarshalECHKeys(block.Bytes)
|
|
||||||
if err != nil {
|
|
||||||
return E.Cause(err, "parse ECH keys")
|
|
||||||
}
|
|
||||||
echKeySet, err := cftls.EXP_NewECHKeySet(echKeys)
|
|
||||||
if err != nil {
|
|
||||||
return E.Cause(err, "create ECH key set")
|
|
||||||
}
|
|
||||||
c.config.ServerECHProvider = echKeySet
|
|
||||||
c.logger.Info("reloaded ECH keys")
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -213,12 +149,7 @@ func (c *echServerConfig) Close() error {
|
|||||||
var err error
|
var err error
|
||||||
if c.watcher != nil {
|
if c.watcher != nil {
|
||||||
err = E.Append(err, c.watcher.Close(), func(err error) error {
|
err = E.Append(err, c.watcher.Close(), func(err error) error {
|
||||||
return E.Cause(err, "close certificate watcher")
|
return E.Cause(err, "close credentials watcher")
|
||||||
})
|
|
||||||
}
|
|
||||||
if c.echWatcher != nil {
|
|
||||||
err = E.Append(err, c.echWatcher.Close(), func(err error) error {
|
|
||||||
return E.Cause(err, "close ECH key watcher")
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
|
@ -7,14 +7,13 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/sagernet/fswatch"
|
||||||
"github.com/sagernet/sing-box/adapter"
|
"github.com/sagernet/sing-box/adapter"
|
||||||
"github.com/sagernet/sing-box/log"
|
"github.com/sagernet/sing-box/log"
|
||||||
"github.com/sagernet/sing-box/option"
|
"github.com/sagernet/sing-box/option"
|
||||||
"github.com/sagernet/sing/common"
|
"github.com/sagernet/sing/common"
|
||||||
E "github.com/sagernet/sing/common/exceptions"
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
"github.com/sagernet/sing/common/ntp"
|
"github.com/sagernet/sing/common/ntp"
|
||||||
|
|
||||||
"github.com/fsnotify/fsnotify"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var errInsecureUnused = E.New("tls: insecure unused")
|
var errInsecureUnused = E.New("tls: insecure unused")
|
||||||
@ -27,7 +26,7 @@ type STDServerConfig struct {
|
|||||||
key []byte
|
key []byte
|
||||||
certificatePath string
|
certificatePath string
|
||||||
keyPath string
|
keyPath string
|
||||||
watcher *fsnotify.Watcher
|
watcher *fswatch.Watcher
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *STDServerConfig) ServerName() string {
|
func (c *STDServerConfig) ServerName() string {
|
||||||
@ -88,59 +87,37 @@ func (c *STDServerConfig) Start() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *STDServerConfig) startWatcher() error {
|
func (c *STDServerConfig) startWatcher() error {
|
||||||
watcher, err := fsnotify.NewWatcher()
|
var watchPath []string
|
||||||
|
if c.certificatePath != "" {
|
||||||
|
watchPath = append(watchPath, c.certificatePath)
|
||||||
|
}
|
||||||
|
if c.keyPath != "" {
|
||||||
|
watchPath = append(watchPath, c.keyPath)
|
||||||
|
}
|
||||||
|
watcher, err := fswatch.NewWatcher(fswatch.Options{
|
||||||
|
Path: watchPath,
|
||||||
|
Callback: func(path string) {
|
||||||
|
err := c.certificateUpdated(path)
|
||||||
|
if err != nil {
|
||||||
|
c.logger.Error(err)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if c.certificatePath != "" {
|
|
||||||
err = watcher.Add(c.certificatePath)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if c.keyPath != "" {
|
|
||||||
err = watcher.Add(c.keyPath)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
c.watcher = watcher
|
c.watcher = watcher
|
||||||
go c.loopUpdate()
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *STDServerConfig) loopUpdate() {
|
func (c *STDServerConfig) certificateUpdated(path string) error {
|
||||||
for {
|
if path == c.certificatePath {
|
||||||
select {
|
|
||||||
case event, ok := <-c.watcher.Events:
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if event.Op&fsnotify.Write != fsnotify.Write {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
err := c.reloadKeyPair()
|
|
||||||
if err != nil {
|
|
||||||
c.logger.Error(E.Cause(err, "reload TLS key pair"))
|
|
||||||
}
|
|
||||||
case err, ok := <-c.watcher.Errors:
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
c.logger.Error(E.Cause(err, "fsnotify error"))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *STDServerConfig) reloadKeyPair() error {
|
|
||||||
if c.certificatePath != "" {
|
|
||||||
certificate, err := os.ReadFile(c.certificatePath)
|
certificate, err := os.ReadFile(c.certificatePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return E.Cause(err, "reload certificate from ", c.certificatePath)
|
return E.Cause(err, "reload certificate from ", c.certificatePath)
|
||||||
}
|
}
|
||||||
c.certificate = certificate
|
c.certificate = certificate
|
||||||
}
|
} else if path == c.keyPath {
|
||||||
if c.keyPath != "" {
|
|
||||||
key, err := os.ReadFile(c.keyPath)
|
key, err := os.ReadFile(c.keyPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return E.Cause(err, "reload key from ", c.keyPath)
|
return E.Cause(err, "reload key from ", c.keyPath)
|
||||||
|
@ -13,14 +13,14 @@ var resourcePaths []string
|
|||||||
|
|
||||||
func FindPath(name string) (string, bool) {
|
func FindPath(name string) (string, bool) {
|
||||||
name = os.ExpandEnv(name)
|
name = os.ExpandEnv(name)
|
||||||
if rw.FileExists(name) {
|
if rw.IsFile(name) {
|
||||||
return name, true
|
return name, true
|
||||||
}
|
}
|
||||||
for _, dir := range resourcePaths {
|
for _, dir := range resourcePaths {
|
||||||
if path := filepath.Join(dir, dirName, name); rw.FileExists(path) {
|
if path := filepath.Join(dir, dirName, name); rw.IsFile(path) {
|
||||||
return path, true
|
return path, true
|
||||||
}
|
}
|
||||||
if path := filepath.Join(dir, name); rw.FileExists(path) {
|
if path := filepath.Join(dir, name); rw.IsFile(path) {
|
||||||
return path, true
|
return path, true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,9 +1,21 @@
|
|||||||
package constant
|
package constant
|
||||||
|
|
||||||
const (
|
const (
|
||||||
ProtocolTLS = "tls"
|
ProtocolTLS = "tls"
|
||||||
ProtocolHTTP = "http"
|
ProtocolHTTP = "http"
|
||||||
ProtocolQUIC = "quic"
|
ProtocolQUIC = "quic"
|
||||||
ProtocolDNS = "dns"
|
ProtocolDNS = "dns"
|
||||||
ProtocolSTUN = "stun"
|
ProtocolSTUN = "stun"
|
||||||
|
ProtocolBitTorrent = "bittorrent"
|
||||||
|
ProtocolDTLS = "dtls"
|
||||||
|
ProtocolSSH = "ssh"
|
||||||
|
ProtocolRDP = "rdp"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
ClientChromium = "chromium"
|
||||||
|
ClientSafari = "safari"
|
||||||
|
ClientFirefox = "firefox"
|
||||||
|
ClientQUICGo = "quic-go"
|
||||||
|
ClientUnknown = "unknown"
|
||||||
)
|
)
|
||||||
|
@ -32,6 +32,12 @@ const (
|
|||||||
|
|
||||||
func ProxyDisplayName(proxyType string) string {
|
func ProxyDisplayName(proxyType string) string {
|
||||||
switch proxyType {
|
switch proxyType {
|
||||||
|
case TypeTun:
|
||||||
|
return "TUN"
|
||||||
|
case TypeRedirect:
|
||||||
|
return "Redirect"
|
||||||
|
case TypeTProxy:
|
||||||
|
return "TProxy"
|
||||||
case TypeDirect:
|
case TypeDirect:
|
||||||
return "Direct"
|
return "Direct"
|
||||||
case TypeBlock:
|
case TypeBlock:
|
||||||
@ -42,6 +48,8 @@ func ProxyDisplayName(proxyType string) string {
|
|||||||
return "SOCKS"
|
return "SOCKS"
|
||||||
case TypeHTTP:
|
case TypeHTTP:
|
||||||
return "HTTP"
|
return "HTTP"
|
||||||
|
case TypeMixed:
|
||||||
|
return "Mixed"
|
||||||
case TypeShadowsocks:
|
case TypeShadowsocks:
|
||||||
return "Shadowsocks"
|
return "Shadowsocks"
|
||||||
case TypeVMess:
|
case TypeVMess:
|
||||||
|
@ -11,9 +11,14 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
RuleSetTypeInline = "inline"
|
||||||
RuleSetTypeLocal = "local"
|
RuleSetTypeLocal = "local"
|
||||||
RuleSetTypeRemote = "remote"
|
RuleSetTypeRemote = "remote"
|
||||||
RuleSetVersion1 = 1
|
|
||||||
RuleSetFormatSource = "source"
|
RuleSetFormatSource = "source"
|
||||||
RuleSetFormatBinary = "binary"
|
RuleSetFormatBinary = "binary"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
RuleSetVersion1 = 1 + iota
|
||||||
|
RuleSetVersion2
|
||||||
|
)
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
//go:build go1.19
|
|
||||||
|
|
||||||
package box
|
package box
|
||||||
|
|
||||||
import (
|
import (
|
@ -1,36 +0,0 @@
|
|||||||
//go:build !go1.19
|
|
||||||
|
|
||||||
package box
|
|
||||||
|
|
||||||
import (
|
|
||||||
"runtime/debug"
|
|
||||||
|
|
||||||
"github.com/sagernet/sing-box/common/conntrack"
|
|
||||||
"github.com/sagernet/sing-box/option"
|
|
||||||
)
|
|
||||||
|
|
||||||
func applyDebugOptions(options option.DebugOptions) {
|
|
||||||
applyDebugListenOption(options)
|
|
||||||
if options.GCPercent != nil {
|
|
||||||
debug.SetGCPercent(*options.GCPercent)
|
|
||||||
}
|
|
||||||
if options.MaxStack != nil {
|
|
||||||
debug.SetMaxStack(*options.MaxStack)
|
|
||||||
}
|
|
||||||
if options.MaxThreads != nil {
|
|
||||||
debug.SetMaxThreads(*options.MaxThreads)
|
|
||||||
}
|
|
||||||
if options.PanicOnFault != nil {
|
|
||||||
debug.SetPanicOnFault(*options.PanicOnFault)
|
|
||||||
}
|
|
||||||
if options.TraceBack != "" {
|
|
||||||
debug.SetTraceback(options.TraceBack)
|
|
||||||
}
|
|
||||||
if options.MemoryLimit != 0 {
|
|
||||||
// debug.SetMemoryLimit(int64(options.MemoryLimit))
|
|
||||||
conntrack.MemoryLimit = uint64(options.MemoryLimit)
|
|
||||||
}
|
|
||||||
if options.OOMKiller != nil {
|
|
||||||
conntrack.KillerEnabled = *options.OOMKiller
|
|
||||||
}
|
|
||||||
}
|
|
@ -8,6 +8,31 @@ icon: material/alert-decagram
|
|||||||
|
|
||||||
If your company or organization is willing to help us return to the App Store, please [contact us](mailto:contact@sagernet.org).
|
If your company or organization is willing to help us return to the App Store, please [contact us](mailto:contact@sagernet.org).
|
||||||
|
|
||||||
|
#### 1.10.0-beta.7
|
||||||
|
|
||||||
|
* Update quic-go to v0.47.0
|
||||||
|
* Fixes and improvements
|
||||||
|
|
||||||
|
#### 1.10.0-beta.6
|
||||||
|
|
||||||
|
* Add RDP sniffer
|
||||||
|
* Fixes and improvements
|
||||||
|
|
||||||
|
#### 1.10.0-beta.5
|
||||||
|
|
||||||
|
* Add PNA support for [Clash API](/configuration/experimental/clash-api/)
|
||||||
|
* Fixes and improvements
|
||||||
|
|
||||||
|
#### 1.10.0-beta.3
|
||||||
|
|
||||||
|
* Add SSH sniffer
|
||||||
|
* Fixes and improvements
|
||||||
|
|
||||||
|
#### 1.10.0-beta.2
|
||||||
|
|
||||||
|
* Build with go1.23
|
||||||
|
* Fixes and improvements
|
||||||
|
|
||||||
### 1.9.4
|
### 1.9.4
|
||||||
|
|
||||||
* Update quic-go to v0.46.0
|
* Update quic-go to v0.46.0
|
||||||
@ -23,18 +48,221 @@ icon: material/alert-decagram
|
|||||||
* Fix UDP connnection leak when sniffing
|
* Fix UDP connnection leak when sniffing
|
||||||
* Fixes and improvements
|
* Fixes and improvements
|
||||||
|
|
||||||
|
#### 1.10.0-alpha.29
|
||||||
|
|
||||||
|
* Update quic-go to v0.46.0
|
||||||
|
* Fixes and improvements
|
||||||
|
|
||||||
|
#### 1.10.0-alpha.25
|
||||||
|
|
||||||
|
* Add AdGuard DNS Filter support **1**
|
||||||
|
|
||||||
|
**1**:
|
||||||
|
|
||||||
|
The new feature allows you to use AdGuard DNS Filter lists in a sing-box without AdGuard Home.
|
||||||
|
|
||||||
|
See [AdGuard DNS Filter](/configuration/rule-set/adguard/).
|
||||||
|
|
||||||
|
#### 1.10.0-alpha.23
|
||||||
|
|
||||||
|
* Add Chromium support for QUIC sniffer
|
||||||
|
* Add client type detect support for QUIC sniffer **1**
|
||||||
|
* Fixes and improvements
|
||||||
|
|
||||||
|
**1**:
|
||||||
|
|
||||||
|
Now the QUIC sniffer can correctly extract the server name from Chromium requests and
|
||||||
|
can identify common QUIC clients, including
|
||||||
|
Chromium, Safari, Firefox, quic-go (including uquic disguised as Chrome).
|
||||||
|
|
||||||
|
See [Protocol Sniff](/configuration/route/sniff/) and [Route Rule](/configuration/route/rule/#client).
|
||||||
|
|
||||||
|
#### 1.10.0-alpha.22
|
||||||
|
|
||||||
|
* Optimize memory usages of rule-sets **1**
|
||||||
|
* Fixes and improvements
|
||||||
|
|
||||||
|
**1**:
|
||||||
|
|
||||||
|
See [Source Format](/configuration/rule-set/source-format/#version).
|
||||||
|
|
||||||
|
#### 1.10.0-alpha.20
|
||||||
|
|
||||||
|
* Add DTLS sniffer
|
||||||
|
* Fixes and improvements
|
||||||
|
|
||||||
|
#### 1.10.0-alpha.19
|
||||||
|
|
||||||
|
* Add `rule-set decompile` command
|
||||||
|
* Add IP address support for `rule-set match` command
|
||||||
|
* Fixes and improvements
|
||||||
|
|
||||||
|
#### 1.10.0-alpha.18
|
||||||
|
|
||||||
|
* Add new `inline` rule-set type **1**
|
||||||
|
* Add auto reload support for local rule-set
|
||||||
|
* Update fsnotify usages **2**
|
||||||
|
* Fixes and improvements
|
||||||
|
|
||||||
|
**1**:
|
||||||
|
|
||||||
|
The new [rule-set] type inline (which also becomes the default type)
|
||||||
|
allows you to write headless rules directly without creating a rule-set file.
|
||||||
|
|
||||||
|
[rule-set]: /configuration/rule-set/
|
||||||
|
|
||||||
|
**2**:
|
||||||
|
|
||||||
|
sing-box now uses fsnotify correctly and will not cancel watching
|
||||||
|
if the target file is deleted or recreated via rename (e.g. `mv`).
|
||||||
|
|
||||||
|
This affects all path options that support reload, including
|
||||||
|
`tls.certificate_path`, `tls.key_path`, `tls.ech.key_path` and `rule_set.path`.
|
||||||
|
|
||||||
|
#### 1.10.0-alpha.17
|
||||||
|
|
||||||
|
* Some chaotic changes **1**
|
||||||
|
* `rule_set_ipcidr_match_source` rule items are renamed **2**
|
||||||
|
* Add `rule_set_ip_cidr_accept_empty` DNS address filter rule item **3**
|
||||||
|
* Update quic-go to v0.45.1
|
||||||
|
* Fixes and improvements
|
||||||
|
|
||||||
|
**1**:
|
||||||
|
|
||||||
|
Something may be broken, please actively report problems with this version.
|
||||||
|
|
||||||
|
**2**:
|
||||||
|
|
||||||
|
`rule_set_ipcidr_match_source` route and DNS rule items are renamed to
|
||||||
|
`rule_set_ip_cidr_match_source` and will be remove in sing-box 1.11.0.
|
||||||
|
|
||||||
|
**3**:
|
||||||
|
|
||||||
|
See [DNS Rule](/configuration/dns/rule/#rule_set_ip_cidr_accept_empty).
|
||||||
|
|
||||||
|
#### 1.10.0-alpha.16
|
||||||
|
|
||||||
|
* Add custom options for `auto-route` and `auto-redirect` **1**
|
||||||
|
* Fixes and improvements
|
||||||
|
|
||||||
|
**1**:
|
||||||
|
|
||||||
|
See [iproute2_table_index](/configuration/inbound/tun/#iproute2_table_index),
|
||||||
|
[iproute2_rule_index](/configuration/inbound/tun/#iproute2_rule_index),
|
||||||
|
[auto_redirect_input_mark](/configuration/inbound/tun/#auto_redirect_input_mark) and
|
||||||
|
[auto_redirect_output_mark](/configuration/inbound/tun/#auto_redirect_output_mark).
|
||||||
|
|
||||||
|
#### 1.10.0-alpha.13
|
||||||
|
|
||||||
|
* TUN address fields are merged **1**
|
||||||
|
* Add route address set support for auto-redirect **2**
|
||||||
|
|
||||||
|
**1**:
|
||||||
|
|
||||||
|
See [Migration](/migration/#tun-address-fields-are-merged).
|
||||||
|
|
||||||
|
**2**:
|
||||||
|
|
||||||
|
The new feature will allow you to configure the destination IP CIDR rules
|
||||||
|
in the specified rule-sets to the firewall automatically.
|
||||||
|
|
||||||
|
Specified or unspecified destinations will bypass the sing-box routes to get better performance
|
||||||
|
(for example, keep hardware offloading of direct traffics on the router).
|
||||||
|
|
||||||
|
See [route_address_set](/configuration/inbound/tun/#route_address_set)
|
||||||
|
and [route_exclude_address_set](/configuration/inbound/tun/#route_exclude_address_set).
|
||||||
|
|
||||||
|
#### 1.10.0-alpha.12
|
||||||
|
|
||||||
|
* Fix auto-redirect not configuring nftables forward chain correctly
|
||||||
|
* Fixes and improvements
|
||||||
|
|
||||||
### 1.9.3
|
### 1.9.3
|
||||||
|
|
||||||
* Fixes and improvements
|
* Fixes and improvements
|
||||||
|
|
||||||
|
#### 1.10.0-alpha.10
|
||||||
|
|
||||||
|
* Fixes and improvements
|
||||||
|
|
||||||
### 1.9.2
|
### 1.9.2
|
||||||
|
|
||||||
* Fixes and improvements
|
* Fixes and improvements
|
||||||
|
|
||||||
|
#### 1.10.0-alpha.8
|
||||||
|
|
||||||
|
* Drop support for go1.18 and go1.19 **1**
|
||||||
|
* Update quic-go to v0.45.0
|
||||||
|
* Update Hysteria2 BBR congestion control
|
||||||
|
* Fixes and improvements
|
||||||
|
|
||||||
|
**1**:
|
||||||
|
|
||||||
|
Due to maintenance difficulties, sing-box 1.10.0 requires at least Go 1.20 to compile.
|
||||||
|
|
||||||
### 1.9.1
|
### 1.9.1
|
||||||
|
|
||||||
* Fixes and improvements
|
* Fixes and improvements
|
||||||
|
|
||||||
|
#### 1.10.0-alpha.7
|
||||||
|
|
||||||
|
* Fixes and improvements
|
||||||
|
|
||||||
|
#### 1.10.0-alpha.5
|
||||||
|
|
||||||
|
* Improve auto-redirect **1**
|
||||||
|
|
||||||
|
**1**:
|
||||||
|
|
||||||
|
nftables support and DNS hijacking has been added.
|
||||||
|
|
||||||
|
Tun inbounds with `auto_route` and `auto_redirect` now works as expected on routers **without intervention**.
|
||||||
|
|
||||||
|
#### 1.10.0-alpha.4
|
||||||
|
|
||||||
|
* Fix auto-redirect **1**
|
||||||
|
* Improve auto-route on linux **2**
|
||||||
|
|
||||||
|
**1**:
|
||||||
|
|
||||||
|
Tun inbounds with `auto_route` and `auto_redirect` now works as expected on routers.
|
||||||
|
|
||||||
|
**2**:
|
||||||
|
|
||||||
|
Tun inbounds with `auto_route` and `strict_route` now works as expected on routers and servers,
|
||||||
|
but the usages of [exclude_interface](/configuration/inbound/tun/#exclude_interface) need to be updated.
|
||||||
|
|
||||||
|
#### 1.10.0-alpha.2
|
||||||
|
|
||||||
|
* Move auto-redirect to Tun **1**
|
||||||
|
* Fixes and improvements
|
||||||
|
|
||||||
|
**1**:
|
||||||
|
|
||||||
|
Linux support are added.
|
||||||
|
|
||||||
|
See [Tun](/configuration/inbound/tun/#auto_redirect).
|
||||||
|
|
||||||
|
#### 1.10.0-alpha.1
|
||||||
|
|
||||||
|
* Add tailing comma support in JSON configuration
|
||||||
|
* Add simple auto-redirect for Android **1**
|
||||||
|
* Add BitTorrent sniffer **2**
|
||||||
|
|
||||||
|
**1**:
|
||||||
|
|
||||||
|
It allows you to use redirect inbound in the sing-box Android client
|
||||||
|
and automatically configures IPv4 TCP redirection via su.
|
||||||
|
|
||||||
|
This may alleviate the symptoms of some OCD patients who think that
|
||||||
|
redirect can effectively save power compared to the system HTTP Proxy.
|
||||||
|
|
||||||
|
See [Redirect](/configuration/inbound/redirect/).
|
||||||
|
|
||||||
|
**2**:
|
||||||
|
|
||||||
|
See [Protocol Sniff](/configuration/route/sniff/).
|
||||||
|
|
||||||
### 1.9.0
|
### 1.9.0
|
||||||
|
|
||||||
* Fixes and improvements
|
* Fixes and improvements
|
||||||
@ -338,7 +566,7 @@ See [Address Filter Fields](/configuration/dns/rule#address-filter-fields).
|
|||||||
Important changes since 1.7:
|
Important changes since 1.7:
|
||||||
|
|
||||||
* Migrate cache file from Clash API to independent options **1**
|
* Migrate cache file from Clash API to independent options **1**
|
||||||
* Introducing [Rule Set](/configuration/rule-set/) **2**
|
* Introducing [rule-set](/configuration/rule-set/) **2**
|
||||||
* Add `sing-box geoip`, `sing-box geosite` and `sing-box rule-set` commands **3**
|
* Add `sing-box geoip`, `sing-box geosite` and `sing-box rule-set` commands **3**
|
||||||
* Allow nested logical rules **4**
|
* Allow nested logical rules **4**
|
||||||
* Independent `source_ip_is_private` and `ip_is_private` rules **5**
|
* Independent `source_ip_is_private` and `ip_is_private` rules **5**
|
||||||
@ -358,7 +586,7 @@ See [Cache File](/configuration/experimental/cache-file/) and
|
|||||||
|
|
||||||
**2**:
|
**2**:
|
||||||
|
|
||||||
Rule set is independent collections of rules that can be compiled into binaries to improve performance.
|
rule-set is independent collections of rules that can be compiled into binaries to improve performance.
|
||||||
Compared to legacy GeoIP and Geosite resources,
|
Compared to legacy GeoIP and Geosite resources,
|
||||||
it can include more types of rules, load faster,
|
it can include more types of rules, load faster,
|
||||||
use less memory, and update automatically.
|
use less memory, and update automatically.
|
||||||
@ -366,16 +594,16 @@ use less memory, and update automatically.
|
|||||||
See [Route#rule_set](/configuration/route/#rule_set),
|
See [Route#rule_set](/configuration/route/#rule_set),
|
||||||
[Route Rule](/configuration/route/rule/),
|
[Route Rule](/configuration/route/rule/),
|
||||||
[DNS Rule](/configuration/dns/rule/),
|
[DNS Rule](/configuration/dns/rule/),
|
||||||
[Rule Set](/configuration/rule-set/),
|
[rule-set](/configuration/rule-set/),
|
||||||
[Source Format](/configuration/rule-set/source-format/) and
|
[Source Format](/configuration/rule-set/source-format/) and
|
||||||
[Headless Rule](/configuration/rule-set/headless-rule/).
|
[Headless Rule](/configuration/rule-set/headless-rule/).
|
||||||
|
|
||||||
For GEO resources migration, see [Migrate GeoIP to rule sets](/migration/#migrate-geoip-to-rule-sets) and
|
For GEO resources migration, see [Migrate GeoIP to rule-sets](/migration/#migrate-geoip-to-rule-sets) and
|
||||||
[Migrate Geosite to rule sets](/migration/#migrate-geosite-to-rule-sets).
|
[Migrate Geosite to rule-sets](/migration/#migrate-geosite-to-rule-sets).
|
||||||
|
|
||||||
**3**:
|
**3**:
|
||||||
|
|
||||||
New commands manage GeoIP, Geosite and rule set resources, and help you migrate GEO resources to rule sets.
|
New commands manage GeoIP, Geosite and rule-set resources, and help you migrate GEO resources to rule-sets.
|
||||||
|
|
||||||
**4**:
|
**4**:
|
||||||
|
|
||||||
@ -572,7 +800,7 @@ This change is intended to break incorrect usage and essentially requires no act
|
|||||||
|
|
||||||
**1**:
|
**1**:
|
||||||
|
|
||||||
Now the rules in the `rule_set` rule item can be logically considered to be merged into the rule using rule sets,
|
Now the rules in the `rule_set` rule item can be logically considered to be merged into the rule using rule-sets,
|
||||||
rather than completely following the AND logic.
|
rather than completely following the AND logic.
|
||||||
|
|
||||||
#### 1.8.0-alpha.5
|
#### 1.8.0-alpha.5
|
||||||
@ -588,7 +816,7 @@ Since GeoIP was deprecated, we made this rule independent, see [Migration](/migr
|
|||||||
#### 1.8.0-alpha.1
|
#### 1.8.0-alpha.1
|
||||||
|
|
||||||
* Migrate cache file from Clash API to independent options **1**
|
* Migrate cache file from Clash API to independent options **1**
|
||||||
* Introducing [Rule Set](/configuration/rule-set/) **2**
|
* Introducing [rule-set](/configuration/rule-set/) **2**
|
||||||
* Add `sing-box geoip`, `sing-box geosite` and `sing-box rule-set` commands **3**
|
* Add `sing-box geoip`, `sing-box geosite` and `sing-box rule-set` commands **3**
|
||||||
* Allow nested logical rules **4**
|
* Allow nested logical rules **4**
|
||||||
|
|
||||||
@ -599,7 +827,7 @@ See [Cache File](/configuration/experimental/cache-file/) and
|
|||||||
|
|
||||||
**2**:
|
**2**:
|
||||||
|
|
||||||
Rule set is independent collections of rules that can be compiled into binaries to improve performance.
|
rule-set is independent collections of rules that can be compiled into binaries to improve performance.
|
||||||
Compared to legacy GeoIP and Geosite resources,
|
Compared to legacy GeoIP and Geosite resources,
|
||||||
it can include more types of rules, load faster,
|
it can include more types of rules, load faster,
|
||||||
use less memory, and update automatically.
|
use less memory, and update automatically.
|
||||||
@ -607,16 +835,16 @@ use less memory, and update automatically.
|
|||||||
See [Route#rule_set](/configuration/route/#rule_set),
|
See [Route#rule_set](/configuration/route/#rule_set),
|
||||||
[Route Rule](/configuration/route/rule/),
|
[Route Rule](/configuration/route/rule/),
|
||||||
[DNS Rule](/configuration/dns/rule/),
|
[DNS Rule](/configuration/dns/rule/),
|
||||||
[Rule Set](/configuration/rule-set/),
|
[rule-set](/configuration/rule-set/),
|
||||||
[Source Format](/configuration/rule-set/source-format/) and
|
[Source Format](/configuration/rule-set/source-format/) and
|
||||||
[Headless Rule](/configuration/rule-set/headless-rule/).
|
[Headless Rule](/configuration/rule-set/headless-rule/).
|
||||||
|
|
||||||
For GEO resources migration, see [Migrate GeoIP to rule sets](/migration/#migrate-geoip-to-rule-sets) and
|
For GEO resources migration, see [Migrate GeoIP to rule-sets](/migration/#migrate-geoip-to-rule-sets) and
|
||||||
[Migrate Geosite to rule sets](/migration/#migrate-geosite-to-rule-sets).
|
[Migrate Geosite to rule-sets](/migration/#migrate-geosite-to-rule-sets).
|
||||||
|
|
||||||
**3**:
|
**3**:
|
||||||
|
|
||||||
New commands manage GeoIP, Geosite and rule set resources, and help you migrate GEO resources to rule sets.
|
New commands manage GeoIP, Geosite and rule-set resources, and help you migrate GEO resources to rule-sets.
|
||||||
|
|
||||||
**4**:
|
**4**:
|
||||||
|
|
||||||
|
@ -2,6 +2,12 @@
|
|||||||
icon: material/new-box
|
icon: material/new-box
|
||||||
---
|
---
|
||||||
|
|
||||||
|
!!! quote "Changes in sing-box 1.10.0"
|
||||||
|
|
||||||
|
:material-delete-clock: [rule_set_ipcidr_match_source](#rule_set_ipcidr_match_source)
|
||||||
|
:material-plus: [rule_set_ip_cidr_match_source](#rule_set_ip_cidr_match_source)
|
||||||
|
:material-plus: [rule_set_ip_cidr_accept_empty](#rule_set_ip_cidr_accept_empty)
|
||||||
|
|
||||||
!!! quote "Changes in sing-box 1.9.0"
|
!!! quote "Changes in sing-box 1.9.0"
|
||||||
|
|
||||||
:material-plus: [geoip](#geoip)
|
:material-plus: [geoip](#geoip)
|
||||||
@ -117,7 +123,10 @@ icon: material/new-box
|
|||||||
"geoip-cn",
|
"geoip-cn",
|
||||||
"geosite-cn"
|
"geosite-cn"
|
||||||
],
|
],
|
||||||
|
// deprecated
|
||||||
"rule_set_ipcidr_match_source": false,
|
"rule_set_ipcidr_match_source": false,
|
||||||
|
"rule_set_ip_cidr_match_source": false,
|
||||||
|
"rule_set_ip_cidr_accept_empty": false,
|
||||||
"invert": false,
|
"invert": false,
|
||||||
"outbound": [
|
"outbound": [
|
||||||
"direct"
|
"direct"
|
||||||
@ -157,7 +166,7 @@ icon: material/new-box
|
|||||||
(`source_port` || `source_port_range`) &&
|
(`source_port` || `source_port_range`) &&
|
||||||
`other fields`
|
`other fields`
|
||||||
|
|
||||||
Additionally, included rule sets can be considered merged rather than as a single rule sub-item.
|
Additionally, included rule-sets can be considered merged rather than as a single rule sub-item.
|
||||||
|
|
||||||
#### inbound
|
#### inbound
|
||||||
|
|
||||||
@ -303,13 +312,23 @@ Match WiFi BSSID.
|
|||||||
|
|
||||||
!!! question "Since sing-box 1.8.0"
|
!!! question "Since sing-box 1.8.0"
|
||||||
|
|
||||||
Match [Rule Set](/configuration/route/#rule_set).
|
Match [rule-set](/configuration/route/#rule_set).
|
||||||
|
|
||||||
#### rule_set_ipcidr_match_source
|
#### rule_set_ipcidr_match_source
|
||||||
|
|
||||||
!!! question "Since sing-box 1.9.0"
|
!!! question "Since sing-box 1.9.0"
|
||||||
|
|
||||||
Make `ipcidr` in rule sets match the source IP.
|
!!! failure "Deprecated in sing-box 1.10.0"
|
||||||
|
|
||||||
|
`rule_set_ipcidr_match_source` is renamed to `rule_set_ip_cidr_match_source` and will be remove in sing-box 1.11.0.
|
||||||
|
|
||||||
|
Make `ip_cidr` rule items in rule-sets match the source IP.
|
||||||
|
|
||||||
|
#### rule_set_ip_cidr_match_source
|
||||||
|
|
||||||
|
!!! question "Since sing-box 1.10.0"
|
||||||
|
|
||||||
|
Make `ip_cidr` rule items in rule-sets match the source IP.
|
||||||
|
|
||||||
#### invert
|
#### invert
|
||||||
|
|
||||||
@ -347,11 +366,11 @@ Will overrides `dns.client_subnet` and `servers.[].client_subnet`.
|
|||||||
|
|
||||||
### Address Filter Fields
|
### Address Filter Fields
|
||||||
|
|
||||||
Only takes effect for IP address requests. When the query results do not match the address filtering rule items, the current rule will be skipped.
|
Only takes effect for address requests (A/AAAA/HTTPS). When the query results do not match the address filtering rule items, the current rule will be skipped.
|
||||||
|
|
||||||
!!! info ""
|
!!! info ""
|
||||||
|
|
||||||
`ip_cidr` items in included rule sets also takes effect as an address filtering field.
|
`ip_cidr` items in included rule-sets also takes effect as an address filtering field.
|
||||||
|
|
||||||
!!! note ""
|
!!! note ""
|
||||||
|
|
||||||
@ -375,6 +394,12 @@ Match IP CIDR with query response.
|
|||||||
|
|
||||||
Match private IP with query response.
|
Match private IP with query response.
|
||||||
|
|
||||||
|
#### rule_set_ip_cidr_accept_empty
|
||||||
|
|
||||||
|
!!! question "Since sing-box 1.10.0"
|
||||||
|
|
||||||
|
Make `ip_cidr` rules in rule-sets accept empty query response.
|
||||||
|
|
||||||
### Logical Fields
|
### Logical Fields
|
||||||
|
|
||||||
#### type
|
#### type
|
||||||
|
@ -2,6 +2,12 @@
|
|||||||
icon: material/new-box
|
icon: material/new-box
|
||||||
---
|
---
|
||||||
|
|
||||||
|
!!! quote "sing-box 1.10.0 中的更改"
|
||||||
|
|
||||||
|
:material-delete-clock: [rule_set_ipcidr_match_source](#rule_set_ipcidr_match_source)
|
||||||
|
:material-plus: [rule_set_ip_cidr_match_source](#rule_set_ip_cidr_match_source)
|
||||||
|
:material-plus: [rule_set_ip_cidr_accept_empty](#rule_set_ip_cidr_accept_empty)
|
||||||
|
|
||||||
!!! quote "sing-box 1.9.0 中的更改"
|
!!! quote "sing-box 1.9.0 中的更改"
|
||||||
|
|
||||||
:material-plus: [geoip](#geoip)
|
:material-plus: [geoip](#geoip)
|
||||||
@ -117,7 +123,10 @@ icon: material/new-box
|
|||||||
"geoip-cn",
|
"geoip-cn",
|
||||||
"geosite-cn"
|
"geosite-cn"
|
||||||
],
|
],
|
||||||
|
// 已弃用
|
||||||
"rule_set_ipcidr_match_source": false,
|
"rule_set_ipcidr_match_source": false,
|
||||||
|
"rule_set_ip_cidr_match_source": false,
|
||||||
|
"rule_set_ip_cidr_accept_empty": false,
|
||||||
"invert": false,
|
"invert": false,
|
||||||
"outbound": [
|
"outbound": [
|
||||||
"direct"
|
"direct"
|
||||||
@ -307,7 +316,17 @@ DNS 查询类型。值可以为整数或者类型名称字符串。
|
|||||||
|
|
||||||
!!! question "自 sing-box 1.9.0 起"
|
!!! question "自 sing-box 1.9.0 起"
|
||||||
|
|
||||||
使规则集中的 `ipcidr` 规则匹配源 IP。
|
!!! failure "已在 sing-box 1.10.0 废弃"
|
||||||
|
|
||||||
|
`rule_set_ipcidr_match_source` 已重命名为 `rule_set_ip_cidr_match_source` 且将在 sing-box 1.11.0 移除。
|
||||||
|
|
||||||
|
使规则集中的 `ip_cidr` 规则匹配源 IP。
|
||||||
|
|
||||||
|
#### rule_set_ip_cidr_match_source
|
||||||
|
|
||||||
|
!!! question "自 sing-box 1.10.0 起"
|
||||||
|
|
||||||
|
使规则集中的 `ip_cidr` 规则匹配源 IP。
|
||||||
|
|
||||||
#### invert
|
#### invert
|
||||||
|
|
||||||
@ -345,7 +364,7 @@ DNS 查询类型。值可以为整数或者类型名称字符串。
|
|||||||
|
|
||||||
### 地址筛选字段
|
### 地址筛选字段
|
||||||
|
|
||||||
仅对IP地址请求生效。 当查询结果与地址筛选规则项不匹配时,将跳过当前规则。
|
仅对地址请求 (A/AAAA/HTTPS) 生效。 当查询结果与地址筛选规则项不匹配时,将跳过当前规则。
|
||||||
|
|
||||||
!!! info ""
|
!!! info ""
|
||||||
|
|
||||||
@ -365,7 +384,7 @@ DNS 查询类型。值可以为整数或者类型名称字符串。
|
|||||||
|
|
||||||
!!! question "自 sing-box 1.9.0 起"
|
!!! question "自 sing-box 1.9.0 起"
|
||||||
|
|
||||||
与查询相应匹配 IP CIDR。
|
与查询响应匹配 IP CIDR。
|
||||||
|
|
||||||
#### ip_is_private
|
#### ip_is_private
|
||||||
|
|
||||||
@ -373,6 +392,12 @@ DNS 查询类型。值可以为整数或者类型名称字符串。
|
|||||||
|
|
||||||
与查询响应匹配非公开 IP。
|
与查询响应匹配非公开 IP。
|
||||||
|
|
||||||
|
#### rule_set_ip_cidr_accept_empty
|
||||||
|
|
||||||
|
!!! question "自 sing-box 1.10.0 起"
|
||||||
|
|
||||||
|
使规则集中的 `ip_cidr` 规则接受空查询响应。
|
||||||
|
|
||||||
### 逻辑字段
|
### 逻辑字段
|
||||||
|
|
||||||
#### type
|
#### type
|
||||||
|
@ -1,3 +1,12 @@
|
|||||||
|
---
|
||||||
|
icon: material/new-box
|
||||||
|
---
|
||||||
|
|
||||||
|
!!! quote "Changes in sing-box 1.10.0"
|
||||||
|
|
||||||
|
:material-plus: [access_control_allow_origin](#access_control_allow_origin)
|
||||||
|
:material-plus: [access_control_allow_private_network](#access_control_allow_private_network)
|
||||||
|
|
||||||
!!! quote "Changes in sing-box 1.8.0"
|
!!! quote "Changes in sing-box 1.8.0"
|
||||||
|
|
||||||
:material-delete-alert: [store_mode](#store_mode)
|
:material-delete-alert: [store_mode](#store_mode)
|
||||||
@ -8,24 +17,59 @@
|
|||||||
|
|
||||||
### Structure
|
### Structure
|
||||||
|
|
||||||
```json
|
=== "Structure"
|
||||||
{
|
|
||||||
"external_controller": "127.0.0.1:9090",
|
```json
|
||||||
"external_ui": "",
|
{
|
||||||
"external_ui_download_url": "",
|
"external_controller": "127.0.0.1:9090",
|
||||||
"external_ui_download_detour": "",
|
"external_ui": "",
|
||||||
"secret": "",
|
"external_ui_download_url": "",
|
||||||
"default_mode": "",
|
"external_ui_download_detour": "",
|
||||||
|
"secret": "",
|
||||||
// Deprecated
|
"default_mode": "",
|
||||||
|
"access_control_allow_origin": [],
|
||||||
"store_mode": false,
|
"access_control_allow_private_network": false,
|
||||||
"store_selected": false,
|
|
||||||
"store_fakeip": false,
|
// Deprecated
|
||||||
"cache_file": "",
|
|
||||||
"cache_id": ""
|
"store_mode": false,
|
||||||
}
|
"store_selected": false,
|
||||||
```
|
"store_fakeip": false,
|
||||||
|
"cache_file": "",
|
||||||
|
"cache_id": ""
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
=== "Example (online)"
|
||||||
|
|
||||||
|
!!! question "Since sing-box 1.10.0"
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"external_controller": "127.0.0.1:9090",
|
||||||
|
"access_control_allow_origin": [
|
||||||
|
"http://127.0.0.1",
|
||||||
|
"http://yacd.haishan.me"
|
||||||
|
],
|
||||||
|
"access_control_allow_private_network": true
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
=== "Example (download)"
|
||||||
|
|
||||||
|
!!! question "Since sing-box 1.10.0"
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"external_controller": "0.0.0.0:9090",
|
||||||
|
"external_ui": "dashboard"
|
||||||
|
// external_ui_download_detour: "direct"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
!!! note ""
|
||||||
|
|
||||||
|
You can ignore the JSON Array [] tag when the content is only one item
|
||||||
|
|
||||||
### Fields
|
### Fields
|
||||||
|
|
||||||
@ -63,6 +107,22 @@ Default mode in clash, `Rule` will be used if empty.
|
|||||||
|
|
||||||
This setting has no direct effect, but can be used in routing and DNS rules via the `clash_mode` rule item.
|
This setting has no direct effect, but can be used in routing and DNS rules via the `clash_mode` rule item.
|
||||||
|
|
||||||
|
#### access_control_allow_origin
|
||||||
|
|
||||||
|
!!! question "Since sing-box 1.10.0"
|
||||||
|
|
||||||
|
CORS allowed origins, `*` will be used if empty.
|
||||||
|
|
||||||
|
To access the Clash API on a private network from a public website, you must explicitly specify it in `access_control_allow_origin` instead of using `*`.
|
||||||
|
|
||||||
|
#### access_control_allow_private_network
|
||||||
|
|
||||||
|
!!! question "Since sing-box 1.10.0"
|
||||||
|
|
||||||
|
Allow access from private network.
|
||||||
|
|
||||||
|
To access the Clash API on a private network from a public website, `access_control_allow_private_network` must be enabled.
|
||||||
|
|
||||||
#### store_mode
|
#### store_mode
|
||||||
|
|
||||||
!!! failure "Deprecated in sing-box 1.8.0"
|
!!! failure "Deprecated in sing-box 1.8.0"
|
||||||
|
@ -1,3 +1,12 @@
|
|||||||
|
---
|
||||||
|
icon: material/new-box
|
||||||
|
---
|
||||||
|
|
||||||
|
!!! quote "sing-box 1.10.0 中的更改"
|
||||||
|
|
||||||
|
:material-plus: [access_control_allow_origin](#access_control_allow_origin)
|
||||||
|
:material-plus: [access_control_allow_private_network](#access_control_allow_private_network)
|
||||||
|
|
||||||
!!! quote "sing-box 1.8.0 中的更改"
|
!!! quote "sing-box 1.8.0 中的更改"
|
||||||
|
|
||||||
:material-delete-alert: [store_mode](#store_mode)
|
:material-delete-alert: [store_mode](#store_mode)
|
||||||
@ -8,24 +17,59 @@
|
|||||||
|
|
||||||
### 结构
|
### 结构
|
||||||
|
|
||||||
```json
|
=== "结构"
|
||||||
{
|
|
||||||
"external_controller": "127.0.0.1:9090",
|
```json
|
||||||
"external_ui": "",
|
{
|
||||||
"external_ui_download_url": "",
|
"external_controller": "127.0.0.1:9090",
|
||||||
"external_ui_download_detour": "",
|
"external_ui": "",
|
||||||
"secret": "",
|
"external_ui_download_url": "",
|
||||||
"default_mode": "",
|
"external_ui_download_detour": "",
|
||||||
|
"secret": "",
|
||||||
// Deprecated
|
"default_mode": "",
|
||||||
|
"access_control_allow_origin": [],
|
||||||
"store_mode": false,
|
"access_control_allow_private_network": false,
|
||||||
"store_selected": false,
|
|
||||||
"store_fakeip": false,
|
// Deprecated
|
||||||
"cache_file": "",
|
|
||||||
"cache_id": ""
|
"store_mode": false,
|
||||||
}
|
"store_selected": false,
|
||||||
```
|
"store_fakeip": false,
|
||||||
|
"cache_file": "",
|
||||||
|
"cache_id": ""
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
=== "示例 (在线)"
|
||||||
|
|
||||||
|
!!! question "自 sing-box 1.10.0 起"
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"external_controller": "127.0.0.1:9090",
|
||||||
|
"access_control_allow_origin": [
|
||||||
|
"http://127.0.0.1",
|
||||||
|
"http://yacd.haishan.me"
|
||||||
|
],
|
||||||
|
"access_control_allow_private_network": true
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
=== "示例 (下载)"
|
||||||
|
|
||||||
|
!!! question "自 sing-box 1.10.0 起"
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"external_controller": "0.0.0.0:9090",
|
||||||
|
"external_ui": "dashboard"
|
||||||
|
// external_ui_download_detour: "direct"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
!!! note ""
|
||||||
|
|
||||||
|
当内容只有一项时,可以忽略 JSON 数组 [] 标签
|
||||||
|
|
||||||
### Fields
|
### Fields
|
||||||
|
|
||||||
@ -61,6 +105,22 @@ Clash 中的默认模式,默认使用 `Rule`。
|
|||||||
|
|
||||||
此设置没有直接影响,但可以通过 `clash_mode` 规则项在路由和 DNS 规则中使用。
|
此设置没有直接影响,但可以通过 `clash_mode` 规则项在路由和 DNS 规则中使用。
|
||||||
|
|
||||||
|
#### access_control_allow_origin
|
||||||
|
|
||||||
|
!!! question "自 sing-box 1.10.0 起"
|
||||||
|
|
||||||
|
允许的 CORS 来源,默认使用 `*`。
|
||||||
|
|
||||||
|
要从公共网站访问私有网络上的 Clash API,必须在 `access_control_allow_origin` 中明确指定它而不是使用 `*`。
|
||||||
|
|
||||||
|
#### access_control_allow_private_network
|
||||||
|
|
||||||
|
!!! question "自 sing-box 1.10.0 起"
|
||||||
|
|
||||||
|
允许从私有网络访问。
|
||||||
|
|
||||||
|
要从公共网站访问私有网络上的 Clash API,必须启用 `access_control_allow_private_network`。
|
||||||
|
|
||||||
#### store_mode
|
#### store_mode
|
||||||
|
|
||||||
!!! failure "已在 sing-box 1.8.0 废弃"
|
!!! failure "已在 sing-box 1.8.0 废弃"
|
||||||
|
@ -2,6 +2,25 @@
|
|||||||
icon: material/new-box
|
icon: material/new-box
|
||||||
---
|
---
|
||||||
|
|
||||||
|
!!! quote "Changes in sing-box 1.10.0"
|
||||||
|
|
||||||
|
:material-plus: [address](#address)
|
||||||
|
:material-delete-clock: [inet4_address](#inet4_address)
|
||||||
|
:material-delete-clock: [inet6_address](#inet6_address)
|
||||||
|
:material-plus: [route_address](#route_address)
|
||||||
|
:material-delete-clock: [inet4_route_address](#inet4_route_address)
|
||||||
|
:material-delete-clock: [inet6_route_address](#inet6_route_address)
|
||||||
|
:material-plus: [route_exclude_address](#route_address)
|
||||||
|
:material-delete-clock: [inet4_route_exclude_address](#inet4_route_exclude_address)
|
||||||
|
:material-delete-clock: [inet6_route_exclude_address](#inet6_route_exclude_address)
|
||||||
|
:material-plus: [iproute2_table_index](#iproute2_table_index)
|
||||||
|
:material-plus: [iproute2_rule_index](#iproute2_table_index)
|
||||||
|
:material-plus: [auto_redirect](#auto_redirect)
|
||||||
|
:material-plus: [auto_redirect_input_mark](#auto_redirect_input_mark)
|
||||||
|
:material-plus: [auto_redirect_output_mark](#auto_redirect_output_mark)
|
||||||
|
:material-plus: [route_address_set](#route_address_set)
|
||||||
|
:material-plus: [route_exclude_address_set](#route_address_set)
|
||||||
|
|
||||||
!!! quote "Changes in sing-box 1.9.0"
|
!!! quote "Changes in sing-box 1.9.0"
|
||||||
|
|
||||||
:material-plus: [platform.http_proxy.bypass_domain](#platformhttp_proxybypass_domain)
|
:material-plus: [platform.http_proxy.bypass_domain](#platformhttp_proxybypass_domain)
|
||||||
@ -23,26 +42,61 @@ icon: material/new-box
|
|||||||
"type": "tun",
|
"type": "tun",
|
||||||
"tag": "tun-in",
|
"tag": "tun-in",
|
||||||
"interface_name": "tun0",
|
"interface_name": "tun0",
|
||||||
"inet4_address": "172.19.0.1/30",
|
"address": [
|
||||||
"inet6_address": "fdfe:dcba:9876::1/126",
|
"172.18.0.1/30",
|
||||||
|
"fdfe:dcba:9876::1/126"
|
||||||
|
],
|
||||||
|
// deprecated
|
||||||
|
"inet4_address": [
|
||||||
|
"172.19.0.1/30"
|
||||||
|
],
|
||||||
|
// deprecated
|
||||||
|
"inet6_address": [
|
||||||
|
"fdfe:dcba:9876::1/126"
|
||||||
|
],
|
||||||
"mtu": 9000,
|
"mtu": 9000,
|
||||||
"gso": false,
|
"gso": false,
|
||||||
"auto_route": true,
|
"auto_route": true,
|
||||||
|
"iproute2_table_index": 2022,
|
||||||
|
"iproute2_rule_index": 9000,
|
||||||
|
"auto_redirect": false,
|
||||||
|
"auto_redirect_input_mark": "0x2023",
|
||||||
|
"auto_redirect_output_mark": "0x2024",
|
||||||
"strict_route": true,
|
"strict_route": true,
|
||||||
|
"route_address": [
|
||||||
|
"0.0.0.0/1",
|
||||||
|
"128.0.0.0/1",
|
||||||
|
"::/1",
|
||||||
|
"8000::/1"
|
||||||
|
],
|
||||||
|
// deprecated
|
||||||
"inet4_route_address": [
|
"inet4_route_address": [
|
||||||
"0.0.0.0/1",
|
"0.0.0.0/1",
|
||||||
"128.0.0.0/1"
|
"128.0.0.0/1"
|
||||||
],
|
],
|
||||||
|
// deprecated
|
||||||
"inet6_route_address": [
|
"inet6_route_address": [
|
||||||
"::/1",
|
"::/1",
|
||||||
"8000::/1"
|
"8000::/1"
|
||||||
],
|
],
|
||||||
|
"route_exclude_address": [
|
||||||
|
"192.168.0.0/16",
|
||||||
|
"fc00::/7"
|
||||||
|
],
|
||||||
|
// deprecated
|
||||||
"inet4_route_exclude_address": [
|
"inet4_route_exclude_address": [
|
||||||
"192.168.0.0/16"
|
"192.168.0.0/16"
|
||||||
],
|
],
|
||||||
|
// deprecated
|
||||||
"inet6_route_exclude_address": [
|
"inet6_route_exclude_address": [
|
||||||
"fc00::/7"
|
"fc00::/7"
|
||||||
],
|
],
|
||||||
|
"route_address_set": [
|
||||||
|
"geoip-cloudflare"
|
||||||
|
],
|
||||||
|
"route_exclude_address_set": [
|
||||||
|
"geoip-cn"
|
||||||
|
],
|
||||||
"endpoint_independent_nat": false,
|
"endpoint_independent_nat": false,
|
||||||
"udp_timeout": "5m",
|
"udp_timeout": "5m",
|
||||||
"stack": "system",
|
"stack": "system",
|
||||||
@ -83,8 +137,8 @@ icon: material/new-box
|
|||||||
"match_domain": []
|
"match_domain": []
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
...
|
||||||
... // Listen Fields
|
// Listen Fields
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -102,14 +156,26 @@ icon: material/new-box
|
|||||||
|
|
||||||
Virtual device name, automatically selected if empty.
|
Virtual device name, automatically selected if empty.
|
||||||
|
|
||||||
|
#### address
|
||||||
|
|
||||||
|
!!! question "Since sing-box 1.10.0"
|
||||||
|
|
||||||
|
IPv4 and IPv6 prefix for the tun interface.
|
||||||
|
|
||||||
#### inet4_address
|
#### inet4_address
|
||||||
|
|
||||||
==Required==
|
!!! failure "Deprecated in sing-box 1.10.0"
|
||||||
|
|
||||||
|
`inet4_address` is merged to `address` and will be removed in sing-box 1.11.0.
|
||||||
|
|
||||||
IPv4 prefix for the tun interface.
|
IPv4 prefix for the tun interface.
|
||||||
|
|
||||||
#### inet6_address
|
#### inet6_address
|
||||||
|
|
||||||
|
!!! failure "Deprecated in sing-box 1.10.0"
|
||||||
|
|
||||||
|
`inet6_address` is merged to `address` and will be removed in sing-box 1.11.0.
|
||||||
|
|
||||||
IPv6 prefix for the tun interface.
|
IPv6 prefix for the tun interface.
|
||||||
|
|
||||||
#### mtu
|
#### mtu
|
||||||
@ -122,7 +188,7 @@ The maximum transmission unit.
|
|||||||
|
|
||||||
!!! quote ""
|
!!! quote ""
|
||||||
|
|
||||||
Only supported on Linux.
|
Only supported on Linux with `auto_route` enabled.
|
||||||
|
|
||||||
Enable generic segmentation offload.
|
Enable generic segmentation offload.
|
||||||
|
|
||||||
@ -138,6 +204,57 @@ Set the default route to the Tun.
|
|||||||
|
|
||||||
By default, VPN takes precedence over tun. To make tun go through VPN, enable `route.override_android_vpn`.
|
By default, VPN takes precedence over tun. To make tun go through VPN, enable `route.override_android_vpn`.
|
||||||
|
|
||||||
|
#### iproute2_table_index
|
||||||
|
|
||||||
|
!!! question "Since sing-box 1.10.0"
|
||||||
|
|
||||||
|
Linux iproute2 table index generated by `auto_route`.
|
||||||
|
|
||||||
|
`2022` is used by default.
|
||||||
|
|
||||||
|
#### iproute2_rule_index
|
||||||
|
|
||||||
|
!!! question "Since sing-box 1.10.0"
|
||||||
|
|
||||||
|
Linux iproute2 rule start index generated by `auto_route`.
|
||||||
|
|
||||||
|
`9000` is used by default.
|
||||||
|
|
||||||
|
#### auto_redirect
|
||||||
|
|
||||||
|
!!! question "Since sing-box 1.10.0"
|
||||||
|
|
||||||
|
!!! quote ""
|
||||||
|
|
||||||
|
Only supported on Linux with `auto_route` enabled.
|
||||||
|
|
||||||
|
Automatically configure iptables/nftables to redirect connections.
|
||||||
|
|
||||||
|
*In Android*:
|
||||||
|
|
||||||
|
Only local connections are forwarded. To share your VPN connection over hotspot or repeater,
|
||||||
|
use [VPNHotspot](https://github.com/Mygod/VPNHotspot).
|
||||||
|
|
||||||
|
*In Linux*:
|
||||||
|
|
||||||
|
`auto_route` with `auto_redirect` now works as expected on routers **without intervention**.
|
||||||
|
|
||||||
|
#### auto_redirect_input_mark
|
||||||
|
|
||||||
|
!!! question "Since sing-box 1.10.0"
|
||||||
|
|
||||||
|
Connection input mark used by `route_address_set` and `route_exclude_address_set`.
|
||||||
|
|
||||||
|
`0x2023` is used by default.
|
||||||
|
|
||||||
|
#### auto_redirect_output_mark
|
||||||
|
|
||||||
|
!!! question "Since sing-box 1.10.0"
|
||||||
|
|
||||||
|
Connection output mark used by `route_address_set` and `route_exclude_address_set`.
|
||||||
|
|
||||||
|
`0x2024` is used by default.
|
||||||
|
|
||||||
#### strict_route
|
#### strict_route
|
||||||
|
|
||||||
Enforce strict routing rules when `auto_route` is enabled:
|
Enforce strict routing rules when `auto_route` is enabled:
|
||||||
@ -145,9 +262,10 @@ Enforce strict routing rules when `auto_route` is enabled:
|
|||||||
*In Linux*:
|
*In Linux*:
|
||||||
|
|
||||||
* Let unsupported network unreachable
|
* Let unsupported network unreachable
|
||||||
|
* Make ICMP traffic route to tun instead of upstream interfaces
|
||||||
* Route all connections to tun
|
* Route all connections to tun
|
||||||
|
|
||||||
It prevents address leaks and makes DNS hijacking work on Android.
|
It prevents IP address leaks and makes DNS hijacking work on Android.
|
||||||
|
|
||||||
*In Windows*:
|
*In Windows*:
|
||||||
|
|
||||||
@ -156,22 +274,80 @@ It prevents address leaks and makes DNS hijacking work on Android.
|
|||||||
|
|
||||||
It may prevent some applications (such as VirtualBox) from working properly in certain situations.
|
It may prevent some applications (such as VirtualBox) from working properly in certain situations.
|
||||||
|
|
||||||
|
#### route_address
|
||||||
|
|
||||||
|
!!! question "Since sing-box 1.10.0"
|
||||||
|
|
||||||
|
Use custom routes instead of default when `auto_route` is enabled.
|
||||||
|
|
||||||
#### inet4_route_address
|
#### inet4_route_address
|
||||||
|
|
||||||
|
!!! failure "Deprecated in sing-box 1.10.0"
|
||||||
|
|
||||||
|
`inet4_route_address` is deprecated and will be removed in sing-box 1.11.0, please use [route_address](#route_address)
|
||||||
|
instead.
|
||||||
|
|
||||||
Use custom routes instead of default when `auto_route` is enabled.
|
Use custom routes instead of default when `auto_route` is enabled.
|
||||||
|
|
||||||
#### inet6_route_address
|
#### inet6_route_address
|
||||||
|
|
||||||
|
!!! failure "Deprecated in sing-box 1.10.0"
|
||||||
|
|
||||||
|
`inet6_route_address` is deprecated and will be removed in sing-box 1.11.0, please use [route_address](#route_address)
|
||||||
|
instead.
|
||||||
|
|
||||||
Use custom routes instead of default when `auto_route` is enabled.
|
Use custom routes instead of default when `auto_route` is enabled.
|
||||||
|
|
||||||
|
#### route_exclude_address
|
||||||
|
|
||||||
|
!!! question "Since sing-box 1.10.0"
|
||||||
|
|
||||||
|
Exclude custom routes when `auto_route` is enabled.
|
||||||
|
|
||||||
#### inet4_route_exclude_address
|
#### inet4_route_exclude_address
|
||||||
|
|
||||||
|
!!! failure "Deprecated in sing-box 1.10.0"
|
||||||
|
|
||||||
|
`inet4_route_exclude_address` is deprecated and will be removed in sing-box 1.11.0, please
|
||||||
|
use [route_exclude_address](#route_exclude_address) instead.
|
||||||
|
|
||||||
Exclude custom routes when `auto_route` is enabled.
|
Exclude custom routes when `auto_route` is enabled.
|
||||||
|
|
||||||
#### inet6_route_exclude_address
|
#### inet6_route_exclude_address
|
||||||
|
|
||||||
|
!!! failure "Deprecated in sing-box 1.10.0"
|
||||||
|
|
||||||
|
`inet6_route_exclude_address` is deprecated and will be removed in sing-box 1.11.0, please
|
||||||
|
use [route_exclude_address](#route_exclude_address) instead.
|
||||||
|
|
||||||
Exclude custom routes when `auto_route` is enabled.
|
Exclude custom routes when `auto_route` is enabled.
|
||||||
|
|
||||||
|
#### route_address_set
|
||||||
|
|
||||||
|
!!! question "Since sing-box 1.10.0"
|
||||||
|
|
||||||
|
!!! quote ""
|
||||||
|
|
||||||
|
Only supported on Linux with nftables and requires `auto_route` and `auto_redirect` enabled.
|
||||||
|
|
||||||
|
Add the destination IP CIDR rules in the specified rule-sets to the firewall.
|
||||||
|
Unmatched traffic will bypass the sing-box routes.
|
||||||
|
|
||||||
|
Conflict with `route.default_mark` and `[dialOptions].routing_mark`.
|
||||||
|
|
||||||
|
#### route_exclude_address_set
|
||||||
|
|
||||||
|
!!! question "Since sing-box 1.10.0"
|
||||||
|
|
||||||
|
!!! quote ""
|
||||||
|
|
||||||
|
Only supported on Linux with nftables and requires `auto_route` and `auto_redirect` enabled.
|
||||||
|
|
||||||
|
Add the destination IP CIDR rules in the specified rule-sets to the firewall.
|
||||||
|
Matched traffic will bypass the sing-box routes.
|
||||||
|
|
||||||
|
Conflict with `route.default_mark` and `[dialOptions].routing_mark`.
|
||||||
|
|
||||||
#### endpoint_independent_nat
|
#### endpoint_independent_nat
|
||||||
|
|
||||||
!!! info ""
|
!!! info ""
|
||||||
@ -214,6 +390,10 @@ Conflict with `exclude_interface`.
|
|||||||
|
|
||||||
#### exclude_interface
|
#### exclude_interface
|
||||||
|
|
||||||
|
!!! warning ""
|
||||||
|
|
||||||
|
When `strict_route` enabled, return traffic to excluded interfaces will not be automatically excluded, so add them as well (example: `br-lan` and `pppoe-wan`).
|
||||||
|
|
||||||
Exclude interfaces in route.
|
Exclude interfaces in route.
|
||||||
|
|
||||||
Conflict with `include_interface`.
|
Conflict with `include_interface`.
|
||||||
|
@ -2,6 +2,25 @@
|
|||||||
icon: material/new-box
|
icon: material/new-box
|
||||||
---
|
---
|
||||||
|
|
||||||
|
!!! quote "Changes in sing-box 1.10.0"
|
||||||
|
|
||||||
|
:material-plus: [address](#address)
|
||||||
|
:material-delete-clock: [inet4_address](#inet4_address)
|
||||||
|
:material-delete-clock: [inet6_address](#inet6_address)
|
||||||
|
:material-plus: [route_address](#route_address)
|
||||||
|
:material-delete-clock: [inet4_route_address](#inet4_route_address)
|
||||||
|
:material-delete-clock: [inet6_route_address](#inet6_route_address)
|
||||||
|
:material-plus: [route_exclude_address](#route_address)
|
||||||
|
:material-delete-clock: [inet4_route_exclude_address](#inet4_route_exclude_address)
|
||||||
|
:material-delete-clock: [inet6_route_exclude_address](#inet6_route_exclude_address)
|
||||||
|
:material-plus: [iproute2_table_index](#iproute2_table_index)
|
||||||
|
:material-plus: [iproute2_rule_index](#iproute2_table_index)
|
||||||
|
:material-plus: [auto_redirect](#auto_redirect)
|
||||||
|
:material-plus: [auto_redirect_input_mark](#auto_redirect_input_mark)
|
||||||
|
:material-plus: [auto_redirect_output_mark](#auto_redirect_output_mark)
|
||||||
|
:material-plus: [route_address_set](#route_address_set)
|
||||||
|
:material-plus: [route_exclude_address_set](#route_address_set)
|
||||||
|
|
||||||
!!! quote "sing-box 1.9.0 中的更改"
|
!!! quote "sing-box 1.9.0 中的更改"
|
||||||
|
|
||||||
:material-plus: [platform.http_proxy.bypass_domain](#platformhttp_proxybypass_domain)
|
:material-plus: [platform.http_proxy.bypass_domain](#platformhttp_proxybypass_domain)
|
||||||
@ -23,26 +42,61 @@ icon: material/new-box
|
|||||||
"type": "tun",
|
"type": "tun",
|
||||||
"tag": "tun-in",
|
"tag": "tun-in",
|
||||||
"interface_name": "tun0",
|
"interface_name": "tun0",
|
||||||
"inet4_address": "172.19.0.1/30",
|
"address": [
|
||||||
"inet6_address": "fdfe:dcba:9876::1/126",
|
"172.18.0.1/30",
|
||||||
|
"fdfe:dcba:9876::1/126"
|
||||||
|
],
|
||||||
|
// 已弃用
|
||||||
|
"inet4_address": [
|
||||||
|
"172.19.0.1/30"
|
||||||
|
],
|
||||||
|
// 已弃用
|
||||||
|
"inet6_address": [
|
||||||
|
"fdfe:dcba:9876::1/126"
|
||||||
|
],
|
||||||
"mtu": 9000,
|
"mtu": 9000,
|
||||||
"gso": false,
|
"gso": false,
|
||||||
"auto_route": true,
|
"auto_route": true,
|
||||||
|
"iproute2_table_index": 2022,
|
||||||
|
"iproute2_rule_index": 9000,
|
||||||
|
"auto_redirect": false,
|
||||||
|
"auto_redirect_input_mark": "0x2023",
|
||||||
|
"auto_redirect_output_mark": "0x2024",
|
||||||
"strict_route": true,
|
"strict_route": true,
|
||||||
|
"route_address": [
|
||||||
|
"0.0.0.0/1",
|
||||||
|
"128.0.0.0/1",
|
||||||
|
"::/1",
|
||||||
|
"8000::/1"
|
||||||
|
],
|
||||||
|
// 已弃用
|
||||||
"inet4_route_address": [
|
"inet4_route_address": [
|
||||||
"0.0.0.0/1",
|
"0.0.0.0/1",
|
||||||
"128.0.0.0/1"
|
"128.0.0.0/1"
|
||||||
],
|
],
|
||||||
|
// 已弃用
|
||||||
"inet6_route_address": [
|
"inet6_route_address": [
|
||||||
"::/1",
|
"::/1",
|
||||||
"8000::/1"
|
"8000::/1"
|
||||||
],
|
],
|
||||||
|
"route_exclude_address": [
|
||||||
|
"192.168.0.0/16",
|
||||||
|
"fc00::/7"
|
||||||
|
],
|
||||||
|
// 已弃用
|
||||||
"inet4_route_exclude_address": [
|
"inet4_route_exclude_address": [
|
||||||
"192.168.0.0/16"
|
"192.168.0.0/16"
|
||||||
],
|
],
|
||||||
|
// 已弃用
|
||||||
"inet6_route_exclude_address": [
|
"inet6_route_exclude_address": [
|
||||||
"fc00::/7"
|
"fc00::/7"
|
||||||
],
|
],
|
||||||
|
"route_address_set": [
|
||||||
|
"geoip-cloudflare"
|
||||||
|
],
|
||||||
|
"route_exclude_address_set": [
|
||||||
|
"geoip-cn"
|
||||||
|
],
|
||||||
"endpoint_independent_nat": false,
|
"endpoint_independent_nat": false,
|
||||||
"udp_timeout": "5m",
|
"udp_timeout": "5m",
|
||||||
"stack": "system",
|
"stack": "system",
|
||||||
@ -102,14 +156,30 @@ icon: material/new-box
|
|||||||
|
|
||||||
虚拟设备名称,默认自动选择。
|
虚拟设备名称,默认自动选择。
|
||||||
|
|
||||||
|
#### address
|
||||||
|
|
||||||
|
!!! question "自 sing-box 1.10.0 起"
|
||||||
|
|
||||||
|
==必填==
|
||||||
|
|
||||||
|
tun 接口的 IPv4 和 IPv6 前缀。
|
||||||
|
|
||||||
#### inet4_address
|
#### inet4_address
|
||||||
|
|
||||||
|
!!! failure "已在 sing-box 1.10.0 废弃"
|
||||||
|
|
||||||
|
`inet4_address` 已合并到 `address` 且将在 sing-box 1.11.0 移除。
|
||||||
|
|
||||||
==必填==
|
==必填==
|
||||||
|
|
||||||
tun 接口的 IPv4 前缀。
|
tun 接口的 IPv4 前缀。
|
||||||
|
|
||||||
#### inet6_address
|
#### inet6_address
|
||||||
|
|
||||||
|
!!! failure "已在 sing-box 1.10.0 废弃"
|
||||||
|
|
||||||
|
`inet6_address` 已合并到 `address` 且将在 sing-box 1.11.0 移除。
|
||||||
|
|
||||||
tun 接口的 IPv6 前缀。
|
tun 接口的 IPv6 前缀。
|
||||||
|
|
||||||
#### mtu
|
#### mtu
|
||||||
@ -138,6 +208,56 @@ tun 接口的 IPv6 前缀。
|
|||||||
|
|
||||||
VPN 默认优先于 tun。要使 tun 经过 VPN,启用 `route.override_android_vpn`。
|
VPN 默认优先于 tun。要使 tun 经过 VPN,启用 `route.override_android_vpn`。
|
||||||
|
|
||||||
|
#### iproute2_table_index
|
||||||
|
|
||||||
|
!!! question "自 sing-box 1.10.0 起"
|
||||||
|
|
||||||
|
`auto_route` 生成的 iproute2 路由表索引。
|
||||||
|
|
||||||
|
默认使用 `2022`。
|
||||||
|
|
||||||
|
#### iproute2_rule_index
|
||||||
|
|
||||||
|
!!! question "自 sing-box 1.10.0 起"
|
||||||
|
|
||||||
|
`auto_route` 生成的 iproute2 规则起始索引。
|
||||||
|
|
||||||
|
默认使用 `9000`。
|
||||||
|
|
||||||
|
#### auto_redirect
|
||||||
|
|
||||||
|
!!! question "自 sing-box 1.10.0 起"
|
||||||
|
|
||||||
|
!!! quote ""
|
||||||
|
|
||||||
|
仅支持 Linux,且需要 `auto_route` 已启用。
|
||||||
|
|
||||||
|
自动配置 iptables 以重定向 TCP 连接。
|
||||||
|
|
||||||
|
*在 Android 中*:
|
||||||
|
|
||||||
|
仅转发本地 IPv4 连接。 要通过热点或中继共享您的 VPN 连接,请使用 [VPNHotspot](https://github.com/Mygod/VPNHotspot)。
|
||||||
|
|
||||||
|
*在 Linux 中*:
|
||||||
|
|
||||||
|
带有 `auto_redirect `的 `auto_route` 现在可以在路由器上按预期工作,**无需干预**。
|
||||||
|
|
||||||
|
#### auto_redirect_input_mark
|
||||||
|
|
||||||
|
!!! question "自 sing-box 1.10.0 起"
|
||||||
|
|
||||||
|
`route_address_set` 和 `route_exclude_address_set` 使用的连接输入标记。
|
||||||
|
|
||||||
|
默认使用 `0x2023`。
|
||||||
|
|
||||||
|
#### auto_redirect_output_mark
|
||||||
|
|
||||||
|
!!! question "自 sing-box 1.10.0 起"
|
||||||
|
|
||||||
|
`route_address_set` 和 `route_exclude_address_set` 使用的连接输出标记。
|
||||||
|
|
||||||
|
默认使用 `0x2024`。
|
||||||
|
|
||||||
#### strict_route
|
#### strict_route
|
||||||
|
|
||||||
启用 `auto_route` 时执行严格的路由规则。
|
启用 `auto_route` 时执行严格的路由规则。
|
||||||
@ -145,9 +265,10 @@ tun 接口的 IPv6 前缀。
|
|||||||
*在 Linux 中*:
|
*在 Linux 中*:
|
||||||
|
|
||||||
* 让不支持的网络无法到达
|
* 让不支持的网络无法到达
|
||||||
|
* 使 ICMP 流量路由到 tun 而不是上游接口
|
||||||
* 将所有连接路由到 tun
|
* 将所有连接路由到 tun
|
||||||
|
|
||||||
它可以防止地址泄漏,并使 DNS 劫持在 Android 上工作。
|
它可以防止 IP 地址泄漏,并使 DNS 劫持在 Android 上工作。
|
||||||
|
|
||||||
*在 Windows 中*:
|
*在 Windows 中*:
|
||||||
|
|
||||||
@ -157,22 +278,76 @@ tun 接口的 IPv6 前缀。
|
|||||||
|
|
||||||
它可能会使某些应用程序(如 VirtualBox)在某些情况下无法正常工作。
|
它可能会使某些应用程序(如 VirtualBox)在某些情况下无法正常工作。
|
||||||
|
|
||||||
|
#### route_address
|
||||||
|
|
||||||
|
!!! question "自 sing-box 1.10.0 起"
|
||||||
|
|
||||||
|
设置到 Tun 的自定义路由。
|
||||||
|
|
||||||
#### inet4_route_address
|
#### inet4_route_address
|
||||||
|
|
||||||
|
!!! failure "已在 sing-box 1.10.0 废弃"
|
||||||
|
|
||||||
|
`inet4_route_address` 已合并到 `route_address` 且将在 sing-box 1.11.0 移除。
|
||||||
|
|
||||||
启用 `auto_route` 时使用自定义路由而不是默认路由。
|
启用 `auto_route` 时使用自定义路由而不是默认路由。
|
||||||
|
|
||||||
#### inet6_route_address
|
#### inet6_route_address
|
||||||
|
|
||||||
|
!!! failure "已在 sing-box 1.10.0 废弃"
|
||||||
|
|
||||||
|
`inet6_route_address` 已合并到 `route_address` 且将在 sing-box 1.11.0 移除。
|
||||||
|
|
||||||
启用 `auto_route` 时使用自定义路由而不是默认路由。
|
启用 `auto_route` 时使用自定义路由而不是默认路由。
|
||||||
|
|
||||||
|
#### route_exclude_address
|
||||||
|
|
||||||
|
!!! question "自 sing-box 1.10.0 起"
|
||||||
|
|
||||||
|
设置到 Tun 的排除自定义路由。
|
||||||
|
|
||||||
#### inet4_route_exclude_address
|
#### inet4_route_exclude_address
|
||||||
|
|
||||||
|
!!! failure "已在 sing-box 1.10.0 废弃"
|
||||||
|
|
||||||
|
`inet4_route_exclude_address` 已合并到 `route_exclude_address` 且将在 sing-box 1.11.0 移除。
|
||||||
|
|
||||||
启用 `auto_route` 时排除自定义路由。
|
启用 `auto_route` 时排除自定义路由。
|
||||||
|
|
||||||
#### inet6_route_exclude_address
|
#### inet6_route_exclude_address
|
||||||
|
|
||||||
|
!!! failure "已在 sing-box 1.10.0 废弃"
|
||||||
|
|
||||||
|
`inet6_route_exclude_address` 已合并到 `route_exclude_address` 且将在 sing-box 1.11.0 移除。
|
||||||
|
|
||||||
启用 `auto_route` 时排除自定义路由。
|
启用 `auto_route` 时排除自定义路由。
|
||||||
|
|
||||||
|
#### route_address_set
|
||||||
|
|
||||||
|
!!! question "自 sing-box 1.10.0 起"
|
||||||
|
|
||||||
|
!!! quote ""
|
||||||
|
|
||||||
|
仅支持 Linux,且需要 nftables,`auto_route` 和 `auto_redirect` 已启用。
|
||||||
|
|
||||||
|
将指定规则集中的目标 IP CIDR 规则添加到防火墙。
|
||||||
|
不匹配的流量将绕过 sing-box 路由。
|
||||||
|
|
||||||
|
与 `route.default_mark` 和 `[dialOptions].routing_mark` 冲突。
|
||||||
|
|
||||||
|
#### route_exclude_address_set
|
||||||
|
|
||||||
|
!!! question "自 sing-box 1.10.0 起"
|
||||||
|
|
||||||
|
!!! quote ""
|
||||||
|
|
||||||
|
仅支持 Linux,且需要 nftables,`auto_route` 和 `auto_redirect` 已启用。
|
||||||
|
|
||||||
|
将指定规则集中的目标 IP CIDR 规则添加到防火墙。
|
||||||
|
匹配的流量将绕过 sing-box 路由。
|
||||||
|
|
||||||
|
与 `route.default_mark` 和 `[dialOptions].routing_mark` 冲突。
|
||||||
|
|
||||||
#### endpoint_independent_nat
|
#### endpoint_independent_nat
|
||||||
|
|
||||||
启用独立于端点的 NAT。
|
启用独立于端点的 NAT。
|
||||||
@ -211,6 +386,10 @@ TCP/IP 栈。
|
|||||||
|
|
||||||
#### exclude_interface
|
#### exclude_interface
|
||||||
|
|
||||||
|
!!! warning ""
|
||||||
|
|
||||||
|
当 `strict_route` 启用,到被排除接口的回程流量将不会被自动排除,因此也要添加它们(例:`br-lan` 与 `pppoe-wan`)。
|
||||||
|
|
||||||
排除路由的接口。
|
排除路由的接口。
|
||||||
|
|
||||||
与 `include_interface` 冲突。
|
与 `include_interface` 冲突。
|
||||||
@ -284,7 +463,7 @@ TCP/IP 栈。
|
|||||||
|
|
||||||
!!! note ""
|
!!! note ""
|
||||||
|
|
||||||
在 Apple 平台,`bypass_domain` 项匹配主机名 **后缀**.
|
在 Apple 平台,`bypass_domain` 项匹配主机名 **后缀**.
|
||||||
|
|
||||||
绕过代理的主机名列表。
|
绕过代理的主机名列表。
|
||||||
|
|
||||||
|
@ -39,7 +39,7 @@ List of [Route Rule](./rule/)
|
|||||||
|
|
||||||
!!! question "Since sing-box 1.8.0"
|
!!! question "Since sing-box 1.8.0"
|
||||||
|
|
||||||
List of [Rule Set](/configuration/rule-set/)
|
List of [rule-set](/configuration/rule-set/)
|
||||||
|
|
||||||
#### final
|
#### final
|
||||||
|
|
||||||
|
@ -1,3 +1,13 @@
|
|||||||
|
---
|
||||||
|
icon: material/alert-decagram
|
||||||
|
---
|
||||||
|
|
||||||
|
!!! quote "Changes in sing-box 1.10.0"
|
||||||
|
|
||||||
|
:material-plus: [client](#client)
|
||||||
|
:material-delete-clock: [rule_set_ipcidr_match_source](#rule_set_ipcidr_match_source)
|
||||||
|
:material-plus: [rule_set_ip_cidr_match_source](#rule_set_ip_cidr_match_source)
|
||||||
|
|
||||||
!!! quote "Changes in sing-box 1.8.0"
|
!!! quote "Changes in sing-box 1.8.0"
|
||||||
|
|
||||||
:material-plus: [rule_set](#rule_set)
|
:material-plus: [rule_set](#rule_set)
|
||||||
@ -31,6 +41,12 @@
|
|||||||
"http",
|
"http",
|
||||||
"quic"
|
"quic"
|
||||||
],
|
],
|
||||||
|
"client": [
|
||||||
|
"chromium",
|
||||||
|
"safari",
|
||||||
|
"firefox",
|
||||||
|
"quic-go"
|
||||||
|
],
|
||||||
"domain": [
|
"domain": [
|
||||||
"test.com"
|
"test.com"
|
||||||
],
|
],
|
||||||
@ -105,7 +121,9 @@
|
|||||||
"geoip-cn",
|
"geoip-cn",
|
||||||
"geosite-cn"
|
"geosite-cn"
|
||||||
],
|
],
|
||||||
|
// deprecated
|
||||||
"rule_set_ipcidr_match_source": false,
|
"rule_set_ipcidr_match_source": false,
|
||||||
|
"rule_set_ip_cidr_match_source": false,
|
||||||
"invert": false,
|
"invert": false,
|
||||||
"outbound": "direct"
|
"outbound": "direct"
|
||||||
},
|
},
|
||||||
@ -137,7 +155,7 @@
|
|||||||
(`source_port` || `source_port_range`) &&
|
(`source_port` || `source_port_range`) &&
|
||||||
`other fields`
|
`other fields`
|
||||||
|
|
||||||
Additionally, included rule sets can be considered merged rather than as a single rule sub-item.
|
Additionally, included rule-sets can be considered merged rather than as a single rule sub-item.
|
||||||
|
|
||||||
#### inbound
|
#### inbound
|
||||||
|
|
||||||
@ -155,7 +173,13 @@ Username, see each inbound for details.
|
|||||||
|
|
||||||
#### protocol
|
#### protocol
|
||||||
|
|
||||||
Sniffed protocol, see [Sniff](/configuration/route/sniff/) for details.
|
Sniffed protocol, see [Protocol Sniff](/configuration/route/sniff/) for details.
|
||||||
|
|
||||||
|
#### client
|
||||||
|
|
||||||
|
!!! question "Since sing-box 1.10.0"
|
||||||
|
|
||||||
|
Sniffed client type, see [Protocol Sniff](/configuration/route/sniff/) for details.
|
||||||
|
|
||||||
#### network
|
#### network
|
||||||
|
|
||||||
@ -297,13 +321,23 @@ Match WiFi BSSID.
|
|||||||
|
|
||||||
!!! question "Since sing-box 1.8.0"
|
!!! question "Since sing-box 1.8.0"
|
||||||
|
|
||||||
Match [Rule Set](/configuration/route/#rule_set).
|
Match [rule-set](/configuration/route/#rule_set).
|
||||||
|
|
||||||
#### rule_set_ipcidr_match_source
|
#### rule_set_ipcidr_match_source
|
||||||
|
|
||||||
!!! question "Since sing-box 1.8.0"
|
!!! question "Since sing-box 1.8.0"
|
||||||
|
|
||||||
Make `ipcidr` in rule sets match the source IP.
|
!!! failure "Deprecated in sing-box 1.10.0"
|
||||||
|
|
||||||
|
`rule_set_ipcidr_match_source` is renamed to `rule_set_ip_cidr_match_source` and will be remove in sing-box 1.11.0.
|
||||||
|
|
||||||
|
Make `ip_cidr` in rule-sets match the source IP.
|
||||||
|
|
||||||
|
#### rule_set_ip_cidr_match_source
|
||||||
|
|
||||||
|
!!! question "Since sing-box 1.10.0"
|
||||||
|
|
||||||
|
Make `ip_cidr` in rule-sets match the source IP.
|
||||||
|
|
||||||
#### invert
|
#### invert
|
||||||
|
|
||||||
|
@ -1,3 +1,13 @@
|
|||||||
|
---
|
||||||
|
icon: material/alert-decagram
|
||||||
|
---
|
||||||
|
|
||||||
|
!!! quote "sing-box 1.10.0 中的更改"
|
||||||
|
|
||||||
|
:material-plus: [client](#client)
|
||||||
|
:material-delete-clock: [rule_set_ipcidr_match_source](#rule_set_ipcidr_match_source)
|
||||||
|
:material-plus: [rule_set_ip_cidr_match_source](#rule_set_ip_cidr_match_source)
|
||||||
|
|
||||||
!!! quote "sing-box 1.8.0 中的更改"
|
!!! quote "sing-box 1.8.0 中的更改"
|
||||||
|
|
||||||
:material-plus: [rule_set](#rule_set)
|
:material-plus: [rule_set](#rule_set)
|
||||||
@ -31,6 +41,12 @@
|
|||||||
"http",
|
"http",
|
||||||
"quic"
|
"quic"
|
||||||
],
|
],
|
||||||
|
"client": [
|
||||||
|
"chromium",
|
||||||
|
"safari",
|
||||||
|
"firefox",
|
||||||
|
"quic-go"
|
||||||
|
],
|
||||||
"domain": [
|
"domain": [
|
||||||
"test.com"
|
"test.com"
|
||||||
],
|
],
|
||||||
@ -103,7 +119,9 @@
|
|||||||
"geoip-cn",
|
"geoip-cn",
|
||||||
"geosite-cn"
|
"geosite-cn"
|
||||||
],
|
],
|
||||||
|
// 已弃用
|
||||||
"rule_set_ipcidr_match_source": false,
|
"rule_set_ipcidr_match_source": false,
|
||||||
|
"rule_set_ip_cidr_match_source": false,
|
||||||
"invert": false,
|
"invert": false,
|
||||||
"outbound": "direct"
|
"outbound": "direct"
|
||||||
},
|
},
|
||||||
@ -155,6 +173,12 @@
|
|||||||
|
|
||||||
探测到的协议, 参阅 [协议探测](/zh/configuration/route/sniff/)。
|
探测到的协议, 参阅 [协议探测](/zh/configuration/route/sniff/)。
|
||||||
|
|
||||||
|
#### client
|
||||||
|
|
||||||
|
!!! question "自 sing-box 1.10.0 起"
|
||||||
|
|
||||||
|
探测到的客户端类型, 参阅 [协议探测](/zh/configuration/route/sniff/)。
|
||||||
|
|
||||||
#### network
|
#### network
|
||||||
|
|
||||||
`tcp` 或 `udp`。
|
`tcp` 或 `udp`。
|
||||||
@ -301,7 +325,17 @@
|
|||||||
|
|
||||||
!!! question "自 sing-box 1.8.0 起"
|
!!! question "自 sing-box 1.8.0 起"
|
||||||
|
|
||||||
使规则集中的 `ipcidr` 规则匹配源 IP。
|
!!! failure "已在 sing-box 1.10.0 废弃"
|
||||||
|
|
||||||
|
`rule_set_ipcidr_match_source` 已重命名为 `rule_set_ip_cidr_match_source` 且将在 sing-box 1.11.0 移除。
|
||||||
|
|
||||||
|
使规则集中的 `ip_cidr` 规则匹配源 IP。
|
||||||
|
|
||||||
|
#### rule_set_ip_cidr_match_source
|
||||||
|
|
||||||
|
!!! question "自 sing-box 1.10.0 起"
|
||||||
|
|
||||||
|
使规则集中的 `ip_cidr` 规则匹配源 IP。
|
||||||
|
|
||||||
#### invert
|
#### invert
|
||||||
|
|
||||||
|
@ -1,11 +1,35 @@
|
|||||||
|
---
|
||||||
|
icon: material/new-box
|
||||||
|
---
|
||||||
|
|
||||||
|
!!! quote "Changes in sing-box 1.10.0"
|
||||||
|
|
||||||
|
:material-plus: QUIC client type detect support for QUIC
|
||||||
|
:material-plus: Chromium support for QUIC
|
||||||
|
:material-plus: BitTorrent support
|
||||||
|
:material-plus: DTLS support
|
||||||
|
:material-plus: SSH support
|
||||||
|
:material-plus: RDP support
|
||||||
|
|
||||||
If enabled in the inbound, the protocol and domain name (if present) of by the connection can be sniffed.
|
If enabled in the inbound, the protocol and domain name (if present) of by the connection can be sniffed.
|
||||||
|
|
||||||
#### Supported Protocols
|
#### Supported Protocols
|
||||||
|
|
||||||
| Network | Protocol | Domain Name |
|
| Network | Protocol | Domain Name | Client |
|
||||||
|:-------:|:--------:|:-----------:|
|
|:-------:|:------------:|:-----------:|:----------------:|
|
||||||
| TCP | HTTP | Host |
|
| TCP | `http` | Host | / |
|
||||||
| TCP | TLS | Server Name |
|
| TCP | `tls` | Server Name | / |
|
||||||
| UDP | QUIC | Server Name |
|
| UDP | `quic` | Server Name | QUIC Client Type |
|
||||||
| UDP | STUN | / |
|
| UDP | `stun` | / | / |
|
||||||
| TCP/UDP | DNS | / |
|
| TCP/UDP | `dns` | / | / |
|
||||||
|
| TCP/UDP | `bittorrent` | / | / |
|
||||||
|
| UDP | `dtls` | / | / |
|
||||||
|
| TCP | `ssh` | / | SSH Client Name |
|
||||||
|
| TCP | `rdp` | / | / |
|
||||||
|
|
||||||
|
| QUIC Client | Type |
|
||||||
|
|:------------------------:|:----------:|
|
||||||
|
| Chromium/Cronet | `chrimium` |
|
||||||
|
| Safari/Apple Network API | `safari` |
|
||||||
|
| Firefox / uquic firefox | `firefox` |
|
||||||
|
| quic-go / uquic chrome | `quic-go` |
|
@ -1,11 +1,35 @@
|
|||||||
|
---
|
||||||
|
icon: material/new-box
|
||||||
|
---
|
||||||
|
|
||||||
|
!!! quote "sing-box 1.10.0 中的更改"
|
||||||
|
|
||||||
|
:material-plus: QUIC 的 客户端类型探测支持
|
||||||
|
:material-plus: QUIC 的 Chromium 支持
|
||||||
|
:material-plus: BitTorrent 支持
|
||||||
|
:material-plus: DTLS 支持
|
||||||
|
:material-plus: SSH 支持
|
||||||
|
:material-plus: RDP 支持
|
||||||
|
|
||||||
如果在入站中启用,则可以嗅探连接的协议和域名(如果存在)。
|
如果在入站中启用,则可以嗅探连接的协议和域名(如果存在)。
|
||||||
|
|
||||||
#### 支持的协议
|
#### 支持的协议
|
||||||
|
|
||||||
| 网络 | 协议 | 域名 |
|
| 网络 | 协议 | 域名 | 客户端 |
|
||||||
|:-------:|:----:|:-----------:|
|
|:-------:|:------------:|:-----------:|:----------:|
|
||||||
| TCP | HTTP | Host |
|
| TCP | `http` | Host | / |
|
||||||
| TCP | TLS | Server Name |
|
| TCP | `tls` | Server Name | / |
|
||||||
| UDP | QUIC | Server Name |
|
| UDP | `quic` | Server Name | QUIC 客户端类型 |
|
||||||
| UDP | STUN | / |
|
| UDP | `stun` | / | / |
|
||||||
| TCP/UDP | DNS | / |
|
| TCP/UDP | `dns` | / | / |
|
||||||
|
| TCP/UDP | `bittorrent` | / | / |
|
||||||
|
| UDP | `dtls` | / | / |
|
||||||
|
| TCP | `ssh` | / | SSH 客户端名称 |
|
||||||
|
| TCP | `rdp` | / | / |
|
||||||
|
|
||||||
|
| QUIC 客户端 | 类型 |
|
||||||
|
|:------------------------:|:----------:|
|
||||||
|
| Chromium/Cronet | `chrimium` |
|
||||||
|
| Safari/Apple Network API | `safari` |
|
||||||
|
| Firefox / uquic firefox | `firefox` |
|
||||||
|
| quic-go / uquic chrome | `quic-go` |
|
71
docs/configuration/rule-set/adguard.md
Normal file
71
docs/configuration/rule-set/adguard.md
Normal file
@ -0,0 +1,71 @@
|
|||||||
|
---
|
||||||
|
icon: material/new-box
|
||||||
|
---
|
||||||
|
|
||||||
|
# AdGuard DNS Filter
|
||||||
|
|
||||||
|
!!! question "Since sing-box 1.10.0"
|
||||||
|
|
||||||
|
sing-box supports some rule-set formats from other projects which cannot be fully translated to sing-box,
|
||||||
|
currently only AdGuard DNS Filter.
|
||||||
|
|
||||||
|
These formats are not directly supported as source formats,
|
||||||
|
instead you need to convert them to binary rule-set.
|
||||||
|
|
||||||
|
## Convert
|
||||||
|
|
||||||
|
Use `sing-box rule-set convert --type adguard [--output <file-name>.srs] <file-name>.txt` to convert to binary rule-set.
|
||||||
|
|
||||||
|
## Performance
|
||||||
|
|
||||||
|
AdGuard keeps all rules in memory and matches them sequentially,
|
||||||
|
while sing-box chooses high performance and smaller memory usage.
|
||||||
|
As a trade-off, you cannot know which rule item is matched.
|
||||||
|
|
||||||
|
## Compatibility
|
||||||
|
|
||||||
|
Almost all rules in [AdGuardSDNSFilter](https://github.com/AdguardTeam/AdGuardSDNSFilter)
|
||||||
|
and rules in rule-sets listed in [adguard-filter-list](https://github.com/ppfeufer/adguard-filter-list)
|
||||||
|
are supported.
|
||||||
|
|
||||||
|
## Supported formats
|
||||||
|
|
||||||
|
### AdGuard Filter
|
||||||
|
|
||||||
|
#### Basic rule syntax
|
||||||
|
|
||||||
|
| Syntax | Supported |
|
||||||
|
|--------|------------------|
|
||||||
|
| `@@` | :material-check: |
|
||||||
|
| `\|\|` | :material-check: |
|
||||||
|
| `\|` | :material-check: |
|
||||||
|
| `^` | :material-check: |
|
||||||
|
| `*` | :material-check: |
|
||||||
|
|
||||||
|
#### Host syntax
|
||||||
|
|
||||||
|
| Syntax | Example | Supported |
|
||||||
|
|-------------|--------------------------|--------------------------|
|
||||||
|
| Scheme | `https://` | :material-alert: Ignored |
|
||||||
|
| Domain Host | `example.org` | :material-check: |
|
||||||
|
| IP Host | `1.1.1.1`, `10.0.0.` | :material-close: |
|
||||||
|
| Regexp | `/regexp/` | :material-check: |
|
||||||
|
| Port | `example.org:80` | :material-close: |
|
||||||
|
| Path | `example.org/path/ad.js` | :material-close: |
|
||||||
|
|
||||||
|
#### Modifier syntax
|
||||||
|
|
||||||
|
| Modifier | Supported |
|
||||||
|
|-----------------------|--------------------------|
|
||||||
|
| `$important` | :material-check: |
|
||||||
|
| `$dnsrewrite=0.0.0.0` | :material-alert: Ignored |
|
||||||
|
| Any other modifiers | :material-close: |
|
||||||
|
|
||||||
|
### Hosts
|
||||||
|
|
||||||
|
Only items with `0.0.0.0` IP addresses will be accepted.
|
||||||
|
|
||||||
|
### Simple
|
||||||
|
|
||||||
|
When all rule lines are valid domains, they are treated as simple line-by-line domain rules which,
|
||||||
|
like hosts, only match the exact same domain.
|
@ -1,48 +1,56 @@
|
|||||||
# Rule Set
|
---
|
||||||
|
icon: material/new-box
|
||||||
|
---
|
||||||
|
|
||||||
|
!!! quote "Changes in sing-box 1.10.0"
|
||||||
|
|
||||||
|
:material-plus: `type: inline`
|
||||||
|
|
||||||
|
# rule-set
|
||||||
|
|
||||||
!!! question "Since sing-box 1.8.0"
|
!!! question "Since sing-box 1.8.0"
|
||||||
|
|
||||||
### Structure
|
### Structure
|
||||||
|
|
||||||
```json
|
=== "Inline"
|
||||||
{
|
|
||||||
"type": "",
|
|
||||||
"tag": "",
|
|
||||||
"format": "",
|
|
||||||
|
|
||||||
... // Typed Fields
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Local Structure
|
!!! question "Since sing-box 1.10.0"
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"type": "local",
|
"type": "inline", // optional
|
||||||
|
"tag": "",
|
||||||
...
|
"rules": []
|
||||||
|
}
|
||||||
"path": ""
|
```
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Remote Structure
|
=== "Local File"
|
||||||
|
|
||||||
!!! info ""
|
```json
|
||||||
|
{
|
||||||
|
"type": "local",
|
||||||
|
"tag": "",
|
||||||
|
"format": "source", // or binary
|
||||||
|
"path": ""
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
Remote rule-set will be cached if `experimental.cache_file.enabled`.
|
=== "Remote File"
|
||||||
|
|
||||||
```json
|
!!! info ""
|
||||||
{
|
|
||||||
"type": "remote",
|
Remote rule-set will be cached if `experimental.cache_file.enabled`.
|
||||||
|
|
||||||
...,
|
```json
|
||||||
|
{
|
||||||
"url": "",
|
"type": "remote",
|
||||||
"download_detour": "",
|
"tag": "",
|
||||||
"update_interval": ""
|
"format": "source", // or binary
|
||||||
}
|
"url": "",
|
||||||
```
|
"download_detour": "", // optional
|
||||||
|
"update_interval": "" // optional
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
### Fields
|
### Fields
|
||||||
|
|
||||||
@ -50,19 +58,31 @@
|
|||||||
|
|
||||||
==Required==
|
==Required==
|
||||||
|
|
||||||
Type of Rule Set, `local` or `remote`.
|
Type of rule-set, `local` or `remote`.
|
||||||
|
|
||||||
#### tag
|
#### tag
|
||||||
|
|
||||||
==Required==
|
==Required==
|
||||||
|
|
||||||
Tag of Rule Set.
|
Tag of rule-set.
|
||||||
|
|
||||||
|
### Inline Fields
|
||||||
|
|
||||||
|
!!! question "Since sing-box 1.10.0"
|
||||||
|
|
||||||
|
#### rules
|
||||||
|
|
||||||
|
==Required==
|
||||||
|
|
||||||
|
List of [Headless Rule](./headless-rule.md/).
|
||||||
|
|
||||||
|
### Local or Remote Fields
|
||||||
|
|
||||||
#### format
|
#### format
|
||||||
|
|
||||||
==Required==
|
==Required==
|
||||||
|
|
||||||
Format of Rule Set, `source` or `binary`.
|
Format of rule-set file, `source` or `binary`.
|
||||||
|
|
||||||
### Local Fields
|
### Local Fields
|
||||||
|
|
||||||
@ -70,7 +90,11 @@ Format of Rule Set, `source` or `binary`.
|
|||||||
|
|
||||||
==Required==
|
==Required==
|
||||||
|
|
||||||
File path of Rule Set.
|
!!! note ""
|
||||||
|
|
||||||
|
Will be automatically reloaded if file modified since sing-box 1.10.0.
|
||||||
|
|
||||||
|
File path of rule-set.
|
||||||
|
|
||||||
### Remote Fields
|
### Remote Fields
|
||||||
|
|
||||||
@ -78,7 +102,7 @@ File path of Rule Set.
|
|||||||
|
|
||||||
==Required==
|
==Required==
|
||||||
|
|
||||||
Download URL of Rule Set.
|
Download URL of rule-set.
|
||||||
|
|
||||||
#### download_detour
|
#### download_detour
|
||||||
|
|
||||||
@ -88,6 +112,6 @@ Default outbound will be used if empty.
|
|||||||
|
|
||||||
#### update_interval
|
#### update_interval
|
||||||
|
|
||||||
Update interval of Rule Set.
|
Update interval of rule-set.
|
||||||
|
|
||||||
`1d` will be used if empty.
|
`1d` will be used if empty.
|
||||||
|
@ -1,12 +1,20 @@
|
|||||||
|
---
|
||||||
|
icon: material/new-box
|
||||||
|
---
|
||||||
|
|
||||||
# Source Format
|
# Source Format
|
||||||
|
|
||||||
|
!!! quote "Changes in sing-box 1.10.0"
|
||||||
|
|
||||||
|
:material-plus: version `2`
|
||||||
|
|
||||||
!!! question "Since sing-box 1.8.0"
|
!!! question "Since sing-box 1.8.0"
|
||||||
|
|
||||||
### Structure
|
### Structure
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"version": 1,
|
"version": 2,
|
||||||
"rules": []
|
"rules": []
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
@ -21,7 +29,16 @@ Use `sing-box rule-set compile [--output <file-name>.srs] <file-name>.json` to c
|
|||||||
|
|
||||||
==Required==
|
==Required==
|
||||||
|
|
||||||
Version of Rule Set, must be `1`.
|
Version of rule-set, one of `1` or `2`.
|
||||||
|
|
||||||
|
* 1: Initial rule-set version, since sing-box 1.8.0.
|
||||||
|
* 2: Optimized memory usages of `domain_suffix` rules.
|
||||||
|
|
||||||
|
The new rule-set version `2` does not make any changes to the format, only affecting `binary` rule-sets compiled by command `rule-set compile`
|
||||||
|
|
||||||
|
Since 1.10.0, the optimization is always applied to `source` rule-sets even if version is set to `1`.
|
||||||
|
|
||||||
|
It is recommended to upgrade to `2` after sing-box 1.10.0 becomes a stable version.
|
||||||
|
|
||||||
#### rules
|
#### rules
|
||||||
|
|
||||||
|
@ -178,6 +178,10 @@ The server certificate line array, in PEM format.
|
|||||||
|
|
||||||
#### certificate_path
|
#### certificate_path
|
||||||
|
|
||||||
|
!!! note ""
|
||||||
|
|
||||||
|
Will be automatically reloaded if file modified.
|
||||||
|
|
||||||
The path to the server certificate, in PEM format.
|
The path to the server certificate, in PEM format.
|
||||||
|
|
||||||
#### key
|
#### key
|
||||||
@ -190,6 +194,10 @@ The server private key line array, in PEM format.
|
|||||||
|
|
||||||
==Server only==
|
==Server only==
|
||||||
|
|
||||||
|
!!! note ""
|
||||||
|
|
||||||
|
Will be automatically reloaded if file modified.
|
||||||
|
|
||||||
The path to the server private key, in PEM format.
|
The path to the server private key, in PEM format.
|
||||||
|
|
||||||
## Custom TLS support
|
## Custom TLS support
|
||||||
@ -266,6 +274,10 @@ ECH key line array, in PEM format.
|
|||||||
|
|
||||||
==Server only==
|
==Server only==
|
||||||
|
|
||||||
|
!!! note ""
|
||||||
|
|
||||||
|
Will be automatically reloaded if file modified.
|
||||||
|
|
||||||
The path to ECH key, in PEM format.
|
The path to ECH key, in PEM format.
|
||||||
|
|
||||||
#### config
|
#### config
|
||||||
@ -397,8 +409,4 @@ A hexadecimal string with zero to eight digits.
|
|||||||
|
|
||||||
The maximum time difference between the server and the client.
|
The maximum time difference between the server and the client.
|
||||||
|
|
||||||
Check disabled if empty.
|
Check disabled if empty.
|
||||||
|
|
||||||
### Reload
|
|
||||||
|
|
||||||
For server configuration, certificate, key and ECH key will be automatically reloaded if modified.
|
|
@ -176,12 +176,20 @@ TLS 版本值:
|
|||||||
|
|
||||||
#### certificate_path
|
#### certificate_path
|
||||||
|
|
||||||
|
!!! note ""
|
||||||
|
|
||||||
|
文件更改时将自动重新加载。
|
||||||
|
|
||||||
服务器 PEM 证书路径。
|
服务器 PEM 证书路径。
|
||||||
|
|
||||||
#### key
|
#### key
|
||||||
|
|
||||||
==仅服务器==
|
==仅服务器==
|
||||||
|
|
||||||
|
!!! note ""
|
||||||
|
|
||||||
|
文件更改时将自动重新加载。
|
||||||
|
|
||||||
服务器 PEM 私钥行数组。
|
服务器 PEM 私钥行数组。
|
||||||
|
|
||||||
#### key_path
|
#### key_path
|
||||||
@ -258,6 +266,10 @@ ECH PEM 密钥行数组
|
|||||||
|
|
||||||
==仅服务器==
|
==仅服务器==
|
||||||
|
|
||||||
|
!!! note ""
|
||||||
|
|
||||||
|
文件更改时将自动重新加载。
|
||||||
|
|
||||||
ECH PEM 密钥路径
|
ECH PEM 密钥路径
|
||||||
|
|
||||||
#### config
|
#### config
|
||||||
@ -384,7 +396,3 @@ ACME DNS01 验证字段。如果配置,将禁用其他验证方法。
|
|||||||
服务器与和客户端之间允许的最大时间差。
|
服务器与和客户端之间允许的最大时间差。
|
||||||
|
|
||||||
默认禁用检查。
|
默认禁用检查。
|
||||||
|
|
||||||
### 重载
|
|
||||||
|
|
||||||
对于服务器配置,如果修改,证书和密钥将自动重新加载。
|
|
@ -4,6 +4,20 @@ icon: material/delete-alert
|
|||||||
|
|
||||||
# Deprecated Feature List
|
# Deprecated Feature List
|
||||||
|
|
||||||
|
## 1.10.0
|
||||||
|
|
||||||
|
#### TUN address fields are merged
|
||||||
|
|
||||||
|
`inet4_address` and `inet6_address` are merged into `address`,
|
||||||
|
`inet4_route_address` and `inet6_route_address` are merged into `route_address`,
|
||||||
|
`inet4_route_exclude_address` and `inet6_route_exclude_address` are merged into `route_exclude_address`.
|
||||||
|
|
||||||
|
Old fields are deprecated and will be removed in sing-box 1.11.0.
|
||||||
|
|
||||||
|
#### Drop support for go1.18 and go1.19
|
||||||
|
|
||||||
|
Due to maintenance difficulties, sing-box 1.10.0 requires at least Go 1.20 to compile.
|
||||||
|
|
||||||
## 1.8.0
|
## 1.8.0
|
||||||
|
|
||||||
#### Cache file and related features in Clash API
|
#### Cache file and related features in Clash API
|
||||||
@ -19,7 +33,7 @@ The maxmind GeoIP National Database, as an IP classification database,
|
|||||||
is not entirely suitable for traffic bypassing,
|
is not entirely suitable for traffic bypassing,
|
||||||
and all existing implementations suffer from high memory usage and difficult management.
|
and all existing implementations suffer from high memory usage and difficult management.
|
||||||
|
|
||||||
sing-box 1.8.0 introduces [Rule Set](/configuration/rule-set/), which can completely replace GeoIP,
|
sing-box 1.8.0 introduces [rule-set](/configuration/rule-set/), which can completely replace GeoIP,
|
||||||
check [Migration](/migration/#migrate-geoip-to-rule-sets).
|
check [Migration](/migration/#migrate-geoip-to-rule-sets).
|
||||||
|
|
||||||
#### Geosite
|
#### Geosite
|
||||||
@ -29,7 +43,7 @@ Geosite is deprecated and may be removed in the future.
|
|||||||
Geosite, the `domain-list-community` project maintained by V2Ray as an early traffic bypassing solution,
|
Geosite, the `domain-list-community` project maintained by V2Ray as an early traffic bypassing solution,
|
||||||
suffers from a number of problems, including lack of maintenance, inaccurate rules, and difficult management.
|
suffers from a number of problems, including lack of maintenance, inaccurate rules, and difficult management.
|
||||||
|
|
||||||
sing-box 1.8.0 introduces [Rule Set](/configuration/rule-set/), which can completely replace Geosite,
|
sing-box 1.8.0 introduces [rule-set](/configuration/rule-set/), which can completely replace Geosite,
|
||||||
check [Migration](/migration/#migrate-geosite-to-rule-sets).
|
check [Migration](/migration/#migrate-geosite-to-rule-sets).
|
||||||
|
|
||||||
## 1.6.0
|
## 1.6.0
|
||||||
|
@ -4,6 +4,20 @@ icon: material/delete-alert
|
|||||||
|
|
||||||
# 废弃功能列表
|
# 废弃功能列表
|
||||||
|
|
||||||
|
## 1.10.0
|
||||||
|
|
||||||
|
#### TUN 地址字段已合并
|
||||||
|
|
||||||
|
`inet4_address` 和 `inet6_address` 已合并为 `address`,
|
||||||
|
`inet4_route_address` 和 `inet6_route_address` 已合并为 `route_address`,
|
||||||
|
`inet4_route_exclude_address` 和 `inet6_route_exclude_address` 已合并为 `route_exclude_address`。
|
||||||
|
|
||||||
|
旧字段已废弃,且将在 sing-box 1.11.0 中移除。
|
||||||
|
|
||||||
|
#### 移除对 go1.18 和 go1.19 的支持
|
||||||
|
|
||||||
|
由于维护困难,sing-box 1.10.0 要求至少 Go 1.20 才能编译。
|
||||||
|
|
||||||
## 1.8.0
|
## 1.8.0
|
||||||
|
|
||||||
#### Clash API 中的 Cache file 及相关功能
|
#### Clash API 中的 Cache file 及相关功能
|
||||||
|
@ -2,6 +2,74 @@
|
|||||||
icon: material/arrange-bring-forward
|
icon: material/arrange-bring-forward
|
||||||
---
|
---
|
||||||
|
|
||||||
|
## 1.10.0
|
||||||
|
|
||||||
|
### TUN address fields are merged
|
||||||
|
|
||||||
|
`inet4_address` and `inet6_address` are merged into `address`,
|
||||||
|
`inet4_route_address` and `inet6_route_address` are merged into `route_address`,
|
||||||
|
`inet4_route_exclude_address` and `inet6_route_exclude_address` are merged into `route_exclude_address`.
|
||||||
|
|
||||||
|
Old fields are deprecated and will be removed in sing-box 1.11.0.
|
||||||
|
|
||||||
|
!!! info "References"
|
||||||
|
|
||||||
|
[TUN](/configuration/inbound/tun/)
|
||||||
|
|
||||||
|
=== ":material-card-remove: Deprecated"
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"inbounds": [
|
||||||
|
{
|
||||||
|
"type": "tun",
|
||||||
|
"inet4_address": "172.19.0.1/30",
|
||||||
|
"inet6_address": "fdfe:dcba:9876::1/126",
|
||||||
|
"inet4_route_address": [
|
||||||
|
"0.0.0.0/1",
|
||||||
|
"128.0.0.0/1"
|
||||||
|
],
|
||||||
|
"inet6_route_address": [
|
||||||
|
"::/1",
|
||||||
|
"8000::/1"
|
||||||
|
],
|
||||||
|
"inet4_route_exclude_address": [
|
||||||
|
"192.168.0.0/16"
|
||||||
|
],
|
||||||
|
"inet6_route_exclude_address": [
|
||||||
|
"fc00::/7"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
=== ":material-card-multiple: New"
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"inbounds": [
|
||||||
|
{
|
||||||
|
"type": "tun",
|
||||||
|
"address": [
|
||||||
|
"172.19.0.1/30",
|
||||||
|
"fdfe:dcba:9876::1/126"
|
||||||
|
],
|
||||||
|
"route_address": [
|
||||||
|
"0.0.0.0/1",
|
||||||
|
"128.0.0.0/1",
|
||||||
|
"::/1",
|
||||||
|
"8000::/1"
|
||||||
|
],
|
||||||
|
"route_exclude_address": [
|
||||||
|
"192.168.0.0/16",
|
||||||
|
"fc00::/7"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
## 1.9.0
|
## 1.9.0
|
||||||
|
|
||||||
### `domain_suffix` behavior update
|
### `domain_suffix` behavior update
|
||||||
@ -60,7 +128,7 @@ which will disrupt the existing `process_path` use cases in Windows.
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
### :material-checkbox-intermediate: Migrate GeoIP to rule sets
|
### :material-checkbox-intermediate: Migrate GeoIP to rule-sets
|
||||||
|
|
||||||
!!! info "References"
|
!!! info "References"
|
||||||
|
|
||||||
@ -68,11 +136,11 @@ which will disrupt the existing `process_path` use cases in Windows.
|
|||||||
[Route](/configuration/route/) /
|
[Route](/configuration/route/) /
|
||||||
[Route Rule](/configuration/route/rule/) /
|
[Route Rule](/configuration/route/rule/) /
|
||||||
[DNS Rule](/configuration/dns/rule/) /
|
[DNS Rule](/configuration/dns/rule/) /
|
||||||
[Rule Set](/configuration/rule-set/)
|
[rule-set](/configuration/rule-set/)
|
||||||
|
|
||||||
!!! tip
|
!!! tip
|
||||||
|
|
||||||
`sing-box geoip` commands can help you convert custom GeoIP into rule sets.
|
`sing-box geoip` commands can help you convert custom GeoIP into rule-sets.
|
||||||
|
|
||||||
=== ":material-card-remove: Deprecated"
|
=== ":material-card-remove: Deprecated"
|
||||||
|
|
||||||
@ -139,13 +207,13 @@ which will disrupt the existing `process_path` use cases in Windows.
|
|||||||
},
|
},
|
||||||
"experimental": {
|
"experimental": {
|
||||||
"cache_file": {
|
"cache_file": {
|
||||||
"enabled": true // required to save Rule Set cache
|
"enabled": true // required to save rule-set cache
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
### :material-checkbox-intermediate: Migrate Geosite to rule sets
|
### :material-checkbox-intermediate: Migrate Geosite to rule-sets
|
||||||
|
|
||||||
!!! info "References"
|
!!! info "References"
|
||||||
|
|
||||||
@ -153,11 +221,11 @@ which will disrupt the existing `process_path` use cases in Windows.
|
|||||||
[Route](/configuration/route/) /
|
[Route](/configuration/route/) /
|
||||||
[Route Rule](/configuration/route/rule/) /
|
[Route Rule](/configuration/route/rule/) /
|
||||||
[DNS Rule](/configuration/dns/rule/) /
|
[DNS Rule](/configuration/dns/rule/) /
|
||||||
[Rule Set](/configuration/rule-set/)
|
[rule-set](/configuration/rule-set/)
|
||||||
|
|
||||||
!!! tip
|
!!! tip
|
||||||
|
|
||||||
`sing-box geosite` commands can help you convert custom Geosite into rule sets.
|
`sing-box geosite` commands can help you convert custom Geosite into rule-sets.
|
||||||
|
|
||||||
=== ":material-card-remove: Deprecated"
|
=== ":material-card-remove: Deprecated"
|
||||||
|
|
||||||
@ -200,7 +268,7 @@ which will disrupt the existing `process_path` use cases in Windows.
|
|||||||
},
|
},
|
||||||
"experimental": {
|
"experimental": {
|
||||||
"cache_file": {
|
"cache_file": {
|
||||||
"enabled": true // required to save Rule Set cache
|
"enabled": true // required to save rule-set cache
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2,6 +2,74 @@
|
|||||||
icon: material/arrange-bring-forward
|
icon: material/arrange-bring-forward
|
||||||
---
|
---
|
||||||
|
|
||||||
|
## 1.10.0
|
||||||
|
|
||||||
|
### TUN 地址字段已合并
|
||||||
|
|
||||||
|
`inet4_address` 和 `inet6_address` 已合并为 `address`,
|
||||||
|
`inet4_route_address` 和 `inet6_route_address` 已合并为 `route_address`,
|
||||||
|
`inet4_route_exclude_address` 和 `inet6_route_exclude_address` 已合并为 `route_exclude_address`。
|
||||||
|
|
||||||
|
旧字段已废弃,且将在 sing-box 1.11.0 中移除。
|
||||||
|
|
||||||
|
!!! info "参考"
|
||||||
|
|
||||||
|
[TUN](/zh/configuration/inbound/tun/)
|
||||||
|
|
||||||
|
=== ":material-card-remove: 弃用的"
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"inbounds": [
|
||||||
|
{
|
||||||
|
"type": "tun",
|
||||||
|
"inet4_address": "172.19.0.1/30",
|
||||||
|
"inet6_address": "fdfe:dcba:9876::1/126",
|
||||||
|
"inet4_route_address": [
|
||||||
|
"0.0.0.0/1",
|
||||||
|
"128.0.0.0/1"
|
||||||
|
],
|
||||||
|
"inet6_route_address": [
|
||||||
|
"::/1",
|
||||||
|
"8000::/1"
|
||||||
|
],
|
||||||
|
"inet4_route_exclude_address": [
|
||||||
|
"192.168.0.0/16"
|
||||||
|
],
|
||||||
|
"inet6_route_exclude_address": [
|
||||||
|
"fc00::/7"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
=== ":material-card-multiple: 新的"
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"inbounds": [
|
||||||
|
{
|
||||||
|
"type": "tun",
|
||||||
|
"address": [
|
||||||
|
"172.19.0.1/30",
|
||||||
|
"fdfe:dcba:9876::1/126"
|
||||||
|
],
|
||||||
|
"route_address": [
|
||||||
|
"0.0.0.0/1",
|
||||||
|
"128.0.0.0/1",
|
||||||
|
"::/1",
|
||||||
|
"8000::/1"
|
||||||
|
],
|
||||||
|
"route_exclude_address": [
|
||||||
|
"192.168.0.0/16",
|
||||||
|
"fc00::/7"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
## 1.9.0
|
## 1.9.0
|
||||||
|
|
||||||
### `domain_suffix` 行为更新
|
### `domain_suffix` 行为更新
|
||||||
@ -138,7 +206,7 @@ sing-box 1.9.0 使 QueryFullProcessImageNameW 输出 Win32 路径(如 `C:\fold
|
|||||||
},
|
},
|
||||||
"experimental": {
|
"experimental": {
|
||||||
"cache_file": {
|
"cache_file": {
|
||||||
"enabled": true // required to save Rule Set cache
|
"enabled": true // required to save rule-set cache
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -199,7 +267,7 @@ sing-box 1.9.0 使 QueryFullProcessImageNameW 输出 Win32 路径(如 `C:\fold
|
|||||||
},
|
},
|
||||||
"experimental": {
|
"experimental": {
|
||||||
"cache_file": {
|
"cache_file": {
|
||||||
"enabled": true // required to save Rule Set cache
|
"enabled": true // required to save rule-set cache
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -14,6 +14,7 @@ import (
|
|||||||
|
|
||||||
"github.com/go-chi/chi/v5"
|
"github.com/go-chi/chi/v5"
|
||||||
"github.com/go-chi/render"
|
"github.com/go-chi/render"
|
||||||
|
"github.com/gofrs/uuid/v5"
|
||||||
)
|
)
|
||||||
|
|
||||||
func connectionRouter(router adapter.Router, trafficManager *trafficontrol.Manager) http.Handler {
|
func connectionRouter(router adapter.Router, trafficManager *trafficontrol.Manager) http.Handler {
|
||||||
@ -76,10 +77,10 @@ func getConnections(trafficManager *trafficontrol.Manager) func(w http.ResponseW
|
|||||||
|
|
||||||
func closeConnection(trafficManager *trafficontrol.Manager) func(w http.ResponseWriter, r *http.Request) {
|
func closeConnection(trafficManager *trafficontrol.Manager) func(w http.ResponseWriter, r *http.Request) {
|
||||||
return func(w http.ResponseWriter, r *http.Request) {
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
id := chi.URLParam(r, "id")
|
id := uuid.FromStringOrNil(chi.URLParam(r, "id"))
|
||||||
snapshot := trafficManager.Snapshot()
|
snapshot := trafficManager.Snapshot()
|
||||||
for _, c := range snapshot.Connections {
|
for _, c := range snapshot.Connections {
|
||||||
if id == c.ID() {
|
if id == c.Metadata().ID {
|
||||||
c.Close()
|
c.Close()
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
@ -7,9 +7,12 @@ import (
|
|||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/sagernet/cors"
|
||||||
"github.com/sagernet/sing-box/adapter"
|
"github.com/sagernet/sing-box/adapter"
|
||||||
"github.com/sagernet/sing-box/common/urltest"
|
"github.com/sagernet/sing-box/common/urltest"
|
||||||
C "github.com/sagernet/sing-box/constant"
|
C "github.com/sagernet/sing-box/constant"
|
||||||
@ -19,7 +22,6 @@ import (
|
|||||||
"github.com/sagernet/sing-box/option"
|
"github.com/sagernet/sing-box/option"
|
||||||
"github.com/sagernet/sing/common"
|
"github.com/sagernet/sing/common"
|
||||||
E "github.com/sagernet/sing/common/exceptions"
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
F "github.com/sagernet/sing/common/format"
|
|
||||||
"github.com/sagernet/sing/common/json"
|
"github.com/sagernet/sing/common/json"
|
||||||
N "github.com/sagernet/sing/common/network"
|
N "github.com/sagernet/sing/common/network"
|
||||||
"github.com/sagernet/sing/service"
|
"github.com/sagernet/sing/service"
|
||||||
@ -28,7 +30,6 @@ import (
|
|||||||
"github.com/sagernet/ws/wsutil"
|
"github.com/sagernet/ws/wsutil"
|
||||||
|
|
||||||
"github.com/go-chi/chi/v5"
|
"github.com/go-chi/chi/v5"
|
||||||
"github.com/go-chi/cors"
|
|
||||||
"github.com/go-chi/render"
|
"github.com/go-chi/render"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -89,11 +90,16 @@ func NewServer(ctx context.Context, router adapter.Router, logFactory log.Observ
|
|||||||
if options.StoreMode || options.StoreSelected || options.StoreFakeIP || options.CacheFile != "" || options.CacheID != "" {
|
if options.StoreMode || options.StoreSelected || options.StoreFakeIP || options.CacheFile != "" || options.CacheID != "" {
|
||||||
return nil, E.New("cache_file and related fields in Clash API is deprecated in sing-box 1.8.0, use experimental.cache_file instead.")
|
return nil, E.New("cache_file and related fields in Clash API is deprecated in sing-box 1.8.0, use experimental.cache_file instead.")
|
||||||
}
|
}
|
||||||
|
allowedOrigins := options.AccessControlAllowOrigin
|
||||||
|
if len(allowedOrigins) == 0 {
|
||||||
|
allowedOrigins = []string{"*"}
|
||||||
|
}
|
||||||
cors := cors.New(cors.Options{
|
cors := cors.New(cors.Options{
|
||||||
AllowedOrigins: []string{"*"},
|
AllowedOrigins: allowedOrigins,
|
||||||
AllowedMethods: []string{"GET", "POST", "PUT", "PATCH", "DELETE"},
|
AllowedMethods: []string{"GET", "POST", "PUT", "PATCH", "DELETE"},
|
||||||
AllowedHeaders: []string{"Content-Type", "Authorization"},
|
AllowedHeaders: []string{"Content-Type", "Authorization"},
|
||||||
MaxAge: 300,
|
AllowPrivateNetwork: options.AccessControlAllowPrivateNetwork,
|
||||||
|
MaxAge: 300,
|
||||||
})
|
})
|
||||||
chiRouter.Use(cors.Handler)
|
chiRouter.Use(cors.Handler)
|
||||||
chiRouter.Group(func(r chi.Router) {
|
chiRouter.Group(func(r chi.Router) {
|
||||||
@ -144,7 +150,18 @@ func (s *Server) PreStart() error {
|
|||||||
func (s *Server) Start() error {
|
func (s *Server) Start() error {
|
||||||
if s.externalController {
|
if s.externalController {
|
||||||
s.checkAndDownloadExternalUI()
|
s.checkAndDownloadExternalUI()
|
||||||
listener, err := net.Listen("tcp", s.httpServer.Addr)
|
var (
|
||||||
|
listener net.Listener
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
for i := 0; i < 3; i++ {
|
||||||
|
listener, err = net.Listen("tcp", s.httpServer.Addr)
|
||||||
|
if runtime.GOOS == "android" && errors.Is(err, syscall.EADDRINUSE) {
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return E.Cause(err, "external controller listen error")
|
return E.Cause(err, "external controller listen error")
|
||||||
}
|
}
|
||||||
@ -218,58 +235,15 @@ func (s *Server) TrafficManager() *trafficontrol.Manager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) RoutedConnection(ctx context.Context, conn net.Conn, metadata adapter.InboundContext, matchedRule adapter.Rule) (net.Conn, adapter.Tracker) {
|
func (s *Server) RoutedConnection(ctx context.Context, conn net.Conn, metadata adapter.InboundContext, matchedRule adapter.Rule) (net.Conn, adapter.Tracker) {
|
||||||
tracker := trafficontrol.NewTCPTracker(conn, s.trafficManager, castMetadata(metadata), s.router, matchedRule)
|
tracker := trafficontrol.NewTCPTracker(conn, s.trafficManager, metadata, s.router, matchedRule)
|
||||||
return tracker, tracker
|
return tracker, tracker
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) RoutedPacketConnection(ctx context.Context, conn N.PacketConn, metadata adapter.InboundContext, matchedRule adapter.Rule) (N.PacketConn, adapter.Tracker) {
|
func (s *Server) RoutedPacketConnection(ctx context.Context, conn N.PacketConn, metadata adapter.InboundContext, matchedRule adapter.Rule) (N.PacketConn, adapter.Tracker) {
|
||||||
tracker := trafficontrol.NewUDPTracker(conn, s.trafficManager, castMetadata(metadata), s.router, matchedRule)
|
tracker := trafficontrol.NewUDPTracker(conn, s.trafficManager, metadata, s.router, matchedRule)
|
||||||
return tracker, tracker
|
return tracker, tracker
|
||||||
}
|
}
|
||||||
|
|
||||||
func castMetadata(metadata adapter.InboundContext) trafficontrol.Metadata {
|
|
||||||
var inbound string
|
|
||||||
if metadata.Inbound != "" {
|
|
||||||
inbound = metadata.InboundType + "/" + metadata.Inbound
|
|
||||||
} else {
|
|
||||||
inbound = metadata.InboundType
|
|
||||||
}
|
|
||||||
var domain string
|
|
||||||
if metadata.Domain != "" {
|
|
||||||
domain = metadata.Domain
|
|
||||||
} else {
|
|
||||||
domain = metadata.Destination.Fqdn
|
|
||||||
}
|
|
||||||
var processPath string
|
|
||||||
if metadata.ProcessInfo != nil {
|
|
||||||
if metadata.ProcessInfo.ProcessPath != "" {
|
|
||||||
processPath = metadata.ProcessInfo.ProcessPath
|
|
||||||
} else if metadata.ProcessInfo.PackageName != "" {
|
|
||||||
processPath = metadata.ProcessInfo.PackageName
|
|
||||||
}
|
|
||||||
if processPath == "" {
|
|
||||||
if metadata.ProcessInfo.UserId != -1 {
|
|
||||||
processPath = F.ToString(metadata.ProcessInfo.UserId)
|
|
||||||
}
|
|
||||||
} else if metadata.ProcessInfo.User != "" {
|
|
||||||
processPath = F.ToString(processPath, " (", metadata.ProcessInfo.User, ")")
|
|
||||||
} else if metadata.ProcessInfo.UserId != -1 {
|
|
||||||
processPath = F.ToString(processPath, " (", metadata.ProcessInfo.UserId, ")")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return trafficontrol.Metadata{
|
|
||||||
NetWork: metadata.Network,
|
|
||||||
Type: inbound,
|
|
||||||
SrcIP: metadata.Source.Addr,
|
|
||||||
DstIP: metadata.Destination.Addr,
|
|
||||||
SrcPort: F.ToString(metadata.Source.Port),
|
|
||||||
DstPort: F.ToString(metadata.Destination.Port),
|
|
||||||
Host: domain,
|
|
||||||
DNSMode: "normal",
|
|
||||||
ProcessPath: processPath,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func authentication(serverSecret string) func(next http.Handler) http.Handler {
|
func authentication(serverSecret string) func(next http.Handler) http.Handler {
|
||||||
return func(next http.Handler) http.Handler {
|
return func(next http.Handler) http.Handler {
|
||||||
fn := func(w http.ResponseWriter, r *http.Request) {
|
fn := func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
@ -2,10 +2,17 @@ package trafficontrol
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
C "github.com/sagernet/sing-box/constant"
|
||||||
"github.com/sagernet/sing-box/experimental/clashapi/compatible"
|
"github.com/sagernet/sing-box/experimental/clashapi/compatible"
|
||||||
|
"github.com/sagernet/sing/common"
|
||||||
"github.com/sagernet/sing/common/atomic"
|
"github.com/sagernet/sing/common/atomic"
|
||||||
|
"github.com/sagernet/sing/common/json"
|
||||||
|
"github.com/sagernet/sing/common/x/list"
|
||||||
|
|
||||||
|
"github.com/gofrs/uuid/v5"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Manager struct {
|
type Manager struct {
|
||||||
@ -16,9 +23,11 @@ type Manager struct {
|
|||||||
uploadTotal atomic.Int64
|
uploadTotal atomic.Int64
|
||||||
downloadTotal atomic.Int64
|
downloadTotal atomic.Int64
|
||||||
|
|
||||||
connections compatible.Map[string, tracker]
|
connections compatible.Map[uuid.UUID, Tracker]
|
||||||
ticker *time.Ticker
|
closedConnectionsAccess sync.Mutex
|
||||||
done chan struct{}
|
closedConnections list.List[TrackerMetadata]
|
||||||
|
ticker *time.Ticker
|
||||||
|
done chan struct{}
|
||||||
// process *process.Process
|
// process *process.Process
|
||||||
memory uint64
|
memory uint64
|
||||||
}
|
}
|
||||||
@ -33,12 +42,22 @@ func NewManager() *Manager {
|
|||||||
return manager
|
return manager
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Manager) Join(c tracker) {
|
func (m *Manager) Join(c Tracker) {
|
||||||
m.connections.Store(c.ID(), c)
|
m.connections.Store(c.Metadata().ID, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Manager) Leave(c tracker) {
|
func (m *Manager) Leave(c Tracker) {
|
||||||
m.connections.Delete(c.ID())
|
metadata := c.Metadata()
|
||||||
|
_, loaded := m.connections.LoadAndDelete(metadata.ID)
|
||||||
|
if loaded {
|
||||||
|
metadata.ClosedAt = time.Now()
|
||||||
|
m.closedConnectionsAccess.Lock()
|
||||||
|
defer m.closedConnectionsAccess.Unlock()
|
||||||
|
if m.closedConnections.Len() >= 1000 {
|
||||||
|
m.closedConnections.PopFront()
|
||||||
|
}
|
||||||
|
m.closedConnections.PushBack(metadata)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Manager) PushUploaded(size int64) {
|
func (m *Manager) PushUploaded(size int64) {
|
||||||
@ -59,14 +78,39 @@ func (m *Manager) Total() (up int64, down int64) {
|
|||||||
return m.uploadTotal.Load(), m.downloadTotal.Load()
|
return m.uploadTotal.Load(), m.downloadTotal.Load()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Manager) Connections() int {
|
func (m *Manager) ConnectionsLen() int {
|
||||||
return m.connections.Len()
|
return m.connections.Len()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *Manager) Connections() []TrackerMetadata {
|
||||||
|
var connections []TrackerMetadata
|
||||||
|
m.connections.Range(func(_ uuid.UUID, value Tracker) bool {
|
||||||
|
connections = append(connections, value.Metadata())
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
return connections
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) ClosedConnections() []TrackerMetadata {
|
||||||
|
m.closedConnectionsAccess.Lock()
|
||||||
|
defer m.closedConnectionsAccess.Unlock()
|
||||||
|
return m.closedConnections.Array()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) Connection(id uuid.UUID) Tracker {
|
||||||
|
connection, loaded := m.connections.Load(id)
|
||||||
|
if !loaded {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return connection
|
||||||
|
}
|
||||||
|
|
||||||
func (m *Manager) Snapshot() *Snapshot {
|
func (m *Manager) Snapshot() *Snapshot {
|
||||||
var connections []tracker
|
var connections []Tracker
|
||||||
m.connections.Range(func(_ string, value tracker) bool {
|
m.connections.Range(func(_ uuid.UUID, value Tracker) bool {
|
||||||
connections = append(connections, value)
|
if value.Metadata().OutboundType != C.TypeDNS {
|
||||||
|
connections = append(connections, value)
|
||||||
|
}
|
||||||
return true
|
return true
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -75,10 +119,10 @@ func (m *Manager) Snapshot() *Snapshot {
|
|||||||
m.memory = memStats.StackInuse + memStats.HeapInuse + memStats.HeapIdle - memStats.HeapReleased
|
m.memory = memStats.StackInuse + memStats.HeapInuse + memStats.HeapIdle - memStats.HeapReleased
|
||||||
|
|
||||||
return &Snapshot{
|
return &Snapshot{
|
||||||
UploadTotal: m.uploadTotal.Load(),
|
Upload: m.uploadTotal.Load(),
|
||||||
DownloadTotal: m.downloadTotal.Load(),
|
Download: m.downloadTotal.Load(),
|
||||||
Connections: connections,
|
Connections: connections,
|
||||||
Memory: m.memory,
|
Memory: m.memory,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -114,8 +158,17 @@ func (m *Manager) Close() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type Snapshot struct {
|
type Snapshot struct {
|
||||||
DownloadTotal int64 `json:"downloadTotal"`
|
Download int64
|
||||||
UploadTotal int64 `json:"uploadTotal"`
|
Upload int64
|
||||||
Connections []tracker `json:"connections"`
|
Connections []Tracker
|
||||||
Memory uint64 `json:"memory"`
|
Memory uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Snapshot) MarshalJSON() ([]byte, error) {
|
||||||
|
return json.Marshal(map[string]any{
|
||||||
|
"downloadTotal": s.Download,
|
||||||
|
"uploadTotal": s.Upload,
|
||||||
|
"connections": common.Map(s.Connections, func(t Tracker) TrackerMetadata { return t.Metadata() }),
|
||||||
|
"memory": s.Memory,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
@ -2,97 +2,135 @@ package trafficontrol
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"net"
|
"net"
|
||||||
"net/netip"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/sagernet/sing-box/adapter"
|
"github.com/sagernet/sing-box/adapter"
|
||||||
"github.com/sagernet/sing/common"
|
"github.com/sagernet/sing/common"
|
||||||
"github.com/sagernet/sing/common/atomic"
|
"github.com/sagernet/sing/common/atomic"
|
||||||
"github.com/sagernet/sing/common/bufio"
|
"github.com/sagernet/sing/common/bufio"
|
||||||
|
F "github.com/sagernet/sing/common/format"
|
||||||
"github.com/sagernet/sing/common/json"
|
"github.com/sagernet/sing/common/json"
|
||||||
N "github.com/sagernet/sing/common/network"
|
N "github.com/sagernet/sing/common/network"
|
||||||
|
|
||||||
"github.com/gofrs/uuid/v5"
|
"github.com/gofrs/uuid/v5"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Metadata struct {
|
type TrackerMetadata struct {
|
||||||
NetWork string `json:"network"`
|
ID uuid.UUID
|
||||||
Type string `json:"type"`
|
Metadata adapter.InboundContext
|
||||||
SrcIP netip.Addr `json:"sourceIP"`
|
CreatedAt time.Time
|
||||||
DstIP netip.Addr `json:"destinationIP"`
|
ClosedAt time.Time
|
||||||
SrcPort string `json:"sourcePort"`
|
Upload *atomic.Int64
|
||||||
DstPort string `json:"destinationPort"`
|
Download *atomic.Int64
|
||||||
Host string `json:"host"`
|
Chain []string
|
||||||
DNSMode string `json:"dnsMode"`
|
Rule adapter.Rule
|
||||||
ProcessPath string `json:"processPath"`
|
Outbound string
|
||||||
|
OutboundType string
|
||||||
}
|
}
|
||||||
|
|
||||||
type tracker interface {
|
func (t TrackerMetadata) MarshalJSON() ([]byte, error) {
|
||||||
ID() string
|
var inbound string
|
||||||
Close() error
|
if t.Metadata.Inbound != "" {
|
||||||
Leave()
|
inbound = t.Metadata.InboundType + "/" + t.Metadata.Inbound
|
||||||
}
|
} else {
|
||||||
|
inbound = t.Metadata.InboundType
|
||||||
type trackerInfo struct {
|
}
|
||||||
UUID uuid.UUID `json:"id"`
|
var domain string
|
||||||
Metadata Metadata `json:"metadata"`
|
if t.Metadata.Domain != "" {
|
||||||
UploadTotal *atomic.Int64 `json:"upload"`
|
domain = t.Metadata.Domain
|
||||||
DownloadTotal *atomic.Int64 `json:"download"`
|
} else {
|
||||||
Start time.Time `json:"start"`
|
domain = t.Metadata.Destination.Fqdn
|
||||||
Chain []string `json:"chains"`
|
}
|
||||||
Rule string `json:"rule"`
|
var processPath string
|
||||||
RulePayload string `json:"rulePayload"`
|
if t.Metadata.ProcessInfo != nil {
|
||||||
}
|
if t.Metadata.ProcessInfo.ProcessPath != "" {
|
||||||
|
processPath = t.Metadata.ProcessInfo.ProcessPath
|
||||||
func (t trackerInfo) MarshalJSON() ([]byte, error) {
|
} else if t.Metadata.ProcessInfo.PackageName != "" {
|
||||||
|
processPath = t.Metadata.ProcessInfo.PackageName
|
||||||
|
}
|
||||||
|
if processPath == "" {
|
||||||
|
if t.Metadata.ProcessInfo.UserId != -1 {
|
||||||
|
processPath = F.ToString(t.Metadata.ProcessInfo.UserId)
|
||||||
|
}
|
||||||
|
} else if t.Metadata.ProcessInfo.User != "" {
|
||||||
|
processPath = F.ToString(processPath, " (", t.Metadata.ProcessInfo.User, ")")
|
||||||
|
} else if t.Metadata.ProcessInfo.UserId != -1 {
|
||||||
|
processPath = F.ToString(processPath, " (", t.Metadata.ProcessInfo.UserId, ")")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var rule string
|
||||||
|
if t.Rule != nil {
|
||||||
|
rule = F.ToString(t.Rule, " => ", t.Rule.Outbound())
|
||||||
|
} else {
|
||||||
|
rule = "final"
|
||||||
|
}
|
||||||
return json.Marshal(map[string]any{
|
return json.Marshal(map[string]any{
|
||||||
"id": t.UUID.String(),
|
"id": t.ID,
|
||||||
"metadata": t.Metadata,
|
"metadata": map[string]any{
|
||||||
"upload": t.UploadTotal.Load(),
|
"network": t.Metadata.Network,
|
||||||
"download": t.DownloadTotal.Load(),
|
"type": inbound,
|
||||||
"start": t.Start,
|
"sourceIP": t.Metadata.Source.Addr,
|
||||||
|
"destinationIP": t.Metadata.Destination.Addr,
|
||||||
|
"sourcePort": F.ToString(t.Metadata.Source.Port),
|
||||||
|
"destinationPort": F.ToString(t.Metadata.Destination.Port),
|
||||||
|
"host": domain,
|
||||||
|
"dnsMode": "normal",
|
||||||
|
"processPath": processPath,
|
||||||
|
},
|
||||||
|
"upload": t.Upload.Load(),
|
||||||
|
"download": t.Download.Load(),
|
||||||
|
"start": t.CreatedAt,
|
||||||
"chains": t.Chain,
|
"chains": t.Chain,
|
||||||
"rule": t.Rule,
|
"rule": rule,
|
||||||
"rulePayload": t.RulePayload,
|
"rulePayload": "",
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
type tcpTracker struct {
|
type Tracker interface {
|
||||||
N.ExtendedConn `json:"-"`
|
adapter.Tracker
|
||||||
*trackerInfo
|
Metadata() TrackerMetadata
|
||||||
manager *Manager
|
Close() error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tt *tcpTracker) ID() string {
|
type TCPConn struct {
|
||||||
return tt.UUID.String()
|
N.ExtendedConn
|
||||||
|
metadata TrackerMetadata
|
||||||
|
manager *Manager
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tt *tcpTracker) Close() error {
|
func (tt *TCPConn) Metadata() TrackerMetadata {
|
||||||
|
return tt.metadata
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tt *TCPConn) Close() error {
|
||||||
tt.manager.Leave(tt)
|
tt.manager.Leave(tt)
|
||||||
return tt.ExtendedConn.Close()
|
return tt.ExtendedConn.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tt *tcpTracker) Leave() {
|
func (tt *TCPConn) Leave() {
|
||||||
tt.manager.Leave(tt)
|
tt.manager.Leave(tt)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tt *tcpTracker) Upstream() any {
|
func (tt *TCPConn) Upstream() any {
|
||||||
return tt.ExtendedConn
|
return tt.ExtendedConn
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tt *tcpTracker) ReaderReplaceable() bool {
|
func (tt *TCPConn) ReaderReplaceable() bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tt *tcpTracker) WriterReplaceable() bool {
|
func (tt *TCPConn) WriterReplaceable() bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewTCPTracker(conn net.Conn, manager *Manager, metadata Metadata, router adapter.Router, rule adapter.Rule) *tcpTracker {
|
func NewTCPTracker(conn net.Conn, manager *Manager, metadata adapter.InboundContext, router adapter.Router, rule adapter.Rule) *TCPConn {
|
||||||
uuid, _ := uuid.NewV4()
|
id, _ := uuid.NewV4()
|
||||||
|
var (
|
||||||
var chain []string
|
chain []string
|
||||||
var next string
|
next string
|
||||||
|
outbound string
|
||||||
|
outboundType string
|
||||||
|
)
|
||||||
if rule == nil {
|
if rule == nil {
|
||||||
if defaultOutbound, err := router.DefaultOutbound(N.NetworkTCP); err == nil {
|
if defaultOutbound, err := router.DefaultOutbound(N.NetworkTCP); err == nil {
|
||||||
next = defaultOutbound.Tag()
|
next = defaultOutbound.Tag()
|
||||||
@ -106,17 +144,17 @@ func NewTCPTracker(conn net.Conn, manager *Manager, metadata Metadata, router ad
|
|||||||
if !loaded {
|
if !loaded {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
outbound = detour.Tag()
|
||||||
|
outboundType = detour.Type()
|
||||||
group, isGroup := detour.(adapter.OutboundGroup)
|
group, isGroup := detour.(adapter.OutboundGroup)
|
||||||
if !isGroup {
|
if !isGroup {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
next = group.Now()
|
next = group.Now()
|
||||||
}
|
}
|
||||||
|
|
||||||
upload := new(atomic.Int64)
|
upload := new(atomic.Int64)
|
||||||
download := new(atomic.Int64)
|
download := new(atomic.Int64)
|
||||||
|
tracker := &TCPConn{
|
||||||
t := &tcpTracker{
|
|
||||||
ExtendedConn: bufio.NewCounterConn(conn, []N.CountFunc{func(n int64) {
|
ExtendedConn: bufio.NewCounterConn(conn, []N.CountFunc{func(n int64) {
|
||||||
upload.Add(n)
|
upload.Add(n)
|
||||||
manager.PushUploaded(n)
|
manager.PushUploaded(n)
|
||||||
@ -124,64 +162,62 @@ func NewTCPTracker(conn net.Conn, manager *Manager, metadata Metadata, router ad
|
|||||||
download.Add(n)
|
download.Add(n)
|
||||||
manager.PushDownloaded(n)
|
manager.PushDownloaded(n)
|
||||||
}}),
|
}}),
|
||||||
manager: manager,
|
metadata: TrackerMetadata{
|
||||||
trackerInfo: &trackerInfo{
|
ID: id,
|
||||||
UUID: uuid,
|
Metadata: metadata,
|
||||||
Start: time.Now(),
|
CreatedAt: time.Now(),
|
||||||
Metadata: metadata,
|
Upload: upload,
|
||||||
Chain: common.Reverse(chain),
|
Download: download,
|
||||||
Rule: "",
|
Chain: common.Reverse(chain),
|
||||||
UploadTotal: upload,
|
Rule: rule,
|
||||||
DownloadTotal: download,
|
Outbound: outbound,
|
||||||
|
OutboundType: outboundType,
|
||||||
},
|
},
|
||||||
|
manager: manager,
|
||||||
}
|
}
|
||||||
|
manager.Join(tracker)
|
||||||
if rule != nil {
|
return tracker
|
||||||
t.trackerInfo.Rule = rule.String() + " => " + rule.Outbound()
|
|
||||||
} else {
|
|
||||||
t.trackerInfo.Rule = "final"
|
|
||||||
}
|
|
||||||
|
|
||||||
manager.Join(t)
|
|
||||||
return t
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type udpTracker struct {
|
type UDPConn struct {
|
||||||
N.PacketConn `json:"-"`
|
N.PacketConn `json:"-"`
|
||||||
*trackerInfo
|
metadata TrackerMetadata
|
||||||
manager *Manager
|
manager *Manager
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ut *udpTracker) ID() string {
|
func (ut *UDPConn) Metadata() TrackerMetadata {
|
||||||
return ut.UUID.String()
|
return ut.metadata
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ut *udpTracker) Close() error {
|
func (ut *UDPConn) Close() error {
|
||||||
ut.manager.Leave(ut)
|
ut.manager.Leave(ut)
|
||||||
return ut.PacketConn.Close()
|
return ut.PacketConn.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ut *udpTracker) Leave() {
|
func (ut *UDPConn) Leave() {
|
||||||
ut.manager.Leave(ut)
|
ut.manager.Leave(ut)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ut *udpTracker) Upstream() any {
|
func (ut *UDPConn) Upstream() any {
|
||||||
return ut.PacketConn
|
return ut.PacketConn
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ut *udpTracker) ReaderReplaceable() bool {
|
func (ut *UDPConn) ReaderReplaceable() bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ut *udpTracker) WriterReplaceable() bool {
|
func (ut *UDPConn) WriterReplaceable() bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewUDPTracker(conn N.PacketConn, manager *Manager, metadata Metadata, router adapter.Router, rule adapter.Rule) *udpTracker {
|
func NewUDPTracker(conn N.PacketConn, manager *Manager, metadata adapter.InboundContext, router adapter.Router, rule adapter.Rule) *UDPConn {
|
||||||
uuid, _ := uuid.NewV4()
|
id, _ := uuid.NewV4()
|
||||||
|
var (
|
||||||
var chain []string
|
chain []string
|
||||||
var next string
|
next string
|
||||||
|
outbound string
|
||||||
|
outboundType string
|
||||||
|
)
|
||||||
if rule == nil {
|
if rule == nil {
|
||||||
if defaultOutbound, err := router.DefaultOutbound(N.NetworkUDP); err == nil {
|
if defaultOutbound, err := router.DefaultOutbound(N.NetworkUDP); err == nil {
|
||||||
next = defaultOutbound.Tag()
|
next = defaultOutbound.Tag()
|
||||||
@ -195,17 +231,17 @@ func NewUDPTracker(conn N.PacketConn, manager *Manager, metadata Metadata, route
|
|||||||
if !loaded {
|
if !loaded {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
outbound = detour.Tag()
|
||||||
|
outboundType = detour.Type()
|
||||||
group, isGroup := detour.(adapter.OutboundGroup)
|
group, isGroup := detour.(adapter.OutboundGroup)
|
||||||
if !isGroup {
|
if !isGroup {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
next = group.Now()
|
next = group.Now()
|
||||||
}
|
}
|
||||||
|
|
||||||
upload := new(atomic.Int64)
|
upload := new(atomic.Int64)
|
||||||
download := new(atomic.Int64)
|
download := new(atomic.Int64)
|
||||||
|
trackerConn := &UDPConn{
|
||||||
ut := &udpTracker{
|
|
||||||
PacketConn: bufio.NewCounterPacketConn(conn, []N.CountFunc{func(n int64) {
|
PacketConn: bufio.NewCounterPacketConn(conn, []N.CountFunc{func(n int64) {
|
||||||
upload.Add(n)
|
upload.Add(n)
|
||||||
manager.PushUploaded(n)
|
manager.PushUploaded(n)
|
||||||
@ -213,24 +249,19 @@ func NewUDPTracker(conn N.PacketConn, manager *Manager, metadata Metadata, route
|
|||||||
download.Add(n)
|
download.Add(n)
|
||||||
manager.PushDownloaded(n)
|
manager.PushDownloaded(n)
|
||||||
}}),
|
}}),
|
||||||
manager: manager,
|
metadata: TrackerMetadata{
|
||||||
trackerInfo: &trackerInfo{
|
ID: id,
|
||||||
UUID: uuid,
|
Metadata: metadata,
|
||||||
Start: time.Now(),
|
CreatedAt: time.Now(),
|
||||||
Metadata: metadata,
|
Upload: upload,
|
||||||
Chain: common.Reverse(chain),
|
Download: download,
|
||||||
Rule: "",
|
Chain: common.Reverse(chain),
|
||||||
UploadTotal: upload,
|
Rule: rule,
|
||||||
DownloadTotal: download,
|
Outbound: outbound,
|
||||||
|
OutboundType: outboundType,
|
||||||
},
|
},
|
||||||
|
manager: manager,
|
||||||
}
|
}
|
||||||
|
manager.Join(trackerConn)
|
||||||
if rule != nil {
|
return trackerConn
|
||||||
ut.trackerInfo.Rule = rule.String() + " => " + rule.Outbound()
|
|
||||||
} else {
|
|
||||||
ut.trackerInfo.Rule = "final"
|
|
||||||
}
|
|
||||||
|
|
||||||
manager.Join(ut)
|
|
||||||
return ut
|
|
||||||
}
|
}
|
||||||
|
@ -14,4 +14,6 @@ const (
|
|||||||
CommandSetClashMode
|
CommandSetClashMode
|
||||||
CommandGetSystemProxyStatus
|
CommandGetSystemProxyStatus
|
||||||
CommandSetSystemProxyEnabled
|
CommandSetSystemProxyEnabled
|
||||||
|
CommandConnections
|
||||||
|
CommandCloseConnection
|
||||||
)
|
)
|
||||||
|
@ -9,7 +9,7 @@ import (
|
|||||||
"github.com/sagernet/sing-box/adapter"
|
"github.com/sagernet/sing-box/adapter"
|
||||||
"github.com/sagernet/sing-box/experimental/clashapi"
|
"github.com/sagernet/sing-box/experimental/clashapi"
|
||||||
E "github.com/sagernet/sing/common/exceptions"
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
"github.com/sagernet/sing/common/rw"
|
"github.com/sagernet/sing/common/varbin"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (c *CommandClient) SetClashMode(newMode string) error {
|
func (c *CommandClient) SetClashMode(newMode string) error {
|
||||||
@ -22,7 +22,7 @@ func (c *CommandClient) SetClashMode(newMode string) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err = rw.WriteVString(conn, newMode)
|
err = varbin.Write(conn, binary.BigEndian, newMode)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -30,7 +30,7 @@ func (c *CommandClient) SetClashMode(newMode string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *CommandServer) handleSetClashMode(conn net.Conn) error {
|
func (s *CommandServer) handleSetClashMode(conn net.Conn) error {
|
||||||
newMode, err := rw.ReadVString(conn)
|
newMode, err := varbin.ReadValue[string](conn, binary.BigEndian)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -50,7 +50,7 @@ func (c *CommandClient) handleModeConn(conn net.Conn) {
|
|||||||
defer conn.Close()
|
defer conn.Close()
|
||||||
|
|
||||||
for {
|
for {
|
||||||
newMode, err := rw.ReadVString(conn)
|
newMode, err := varbin.ReadValue[string](conn, binary.BigEndian)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.handler.Disconnected(err.Error())
|
c.handler.Disconnected(err.Error())
|
||||||
return
|
return
|
||||||
@ -80,7 +80,7 @@ func (s *CommandServer) handleModeConn(conn net.Conn) error {
|
|||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-s.modeUpdate:
|
case <-s.modeUpdate:
|
||||||
err = rw.WriteVString(conn, clashServer.Mode())
|
err = varbin.Write(conn, binary.BigEndian, clashServer.Mode())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -101,12 +101,12 @@ func readClashModeList(reader io.Reader) (modeList []string, currentMode string,
|
|||||||
}
|
}
|
||||||
modeList = make([]string, modeListLength)
|
modeList = make([]string, modeListLength)
|
||||||
for i := 0; i < int(modeListLength); i++ {
|
for i := 0; i < int(modeListLength); i++ {
|
||||||
modeList[i], err = rw.ReadVString(reader)
|
modeList[i], err = varbin.ReadValue[string](reader, binary.BigEndian)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
currentMode, err = rw.ReadVString(reader)
|
currentMode, err = varbin.ReadValue[string](reader, binary.BigEndian)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -118,12 +118,12 @@ func writeClashModeList(writer io.Writer, clashServer adapter.ClashServer) error
|
|||||||
}
|
}
|
||||||
if len(modeList) > 0 {
|
if len(modeList) > 0 {
|
||||||
for _, mode := range modeList {
|
for _, mode := range modeList {
|
||||||
err = rw.WriteVString(writer, mode)
|
err = varbin.Write(writer, binary.BigEndian, mode)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
err = rw.WriteVString(writer, clashServer.Mode())
|
err = varbin.Write(writer, binary.BigEndian, clashServer.Mode())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -25,12 +25,13 @@ type CommandClientOptions struct {
|
|||||||
type CommandClientHandler interface {
|
type CommandClientHandler interface {
|
||||||
Connected()
|
Connected()
|
||||||
Disconnected(message string)
|
Disconnected(message string)
|
||||||
ClearLog()
|
ClearLogs()
|
||||||
WriteLog(message string)
|
WriteLogs(messageList StringIterator)
|
||||||
WriteStatus(message *StatusMessage)
|
WriteStatus(message *StatusMessage)
|
||||||
WriteGroups(message OutboundGroupIterator)
|
WriteGroups(message OutboundGroupIterator)
|
||||||
InitializeClashMode(modeList StringIterator, currentMode string)
|
InitializeClashMode(modeList StringIterator, currentMode string)
|
||||||
UpdateClashMode(newMode string)
|
UpdateClashMode(newMode string)
|
||||||
|
WriteConnections(message *Connections)
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewStandaloneCommandClient() *CommandClient {
|
func NewStandaloneCommandClient() *CommandClient {
|
||||||
@ -83,6 +84,10 @@ func (c *CommandClient) Connect() error {
|
|||||||
}
|
}
|
||||||
switch c.options.Command {
|
switch c.options.Command {
|
||||||
case CommandLog:
|
case CommandLog:
|
||||||
|
err = binary.Write(conn, binary.BigEndian, c.options.StatusInterval)
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "write interval")
|
||||||
|
}
|
||||||
c.handler.Connected()
|
c.handler.Connected()
|
||||||
go c.handleLogConn(conn)
|
go c.handleLogConn(conn)
|
||||||
case CommandStatus:
|
case CommandStatus:
|
||||||
@ -116,6 +121,13 @@ func (c *CommandClient) Connect() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
go c.handleModeConn(conn)
|
go c.handleModeConn(conn)
|
||||||
|
case CommandConnections:
|
||||||
|
err = binary.Write(conn, binary.BigEndian, c.options.StatusInterval)
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "write interval")
|
||||||
|
}
|
||||||
|
c.handler.Connected()
|
||||||
|
go c.handleConnectionsConn(conn)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
54
experimental/libbox/command_close_connection.go
Normal file
54
experimental/libbox/command_close_connection.go
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
package libbox
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"net"
|
||||||
|
|
||||||
|
"github.com/sagernet/sing-box/experimental/clashapi"
|
||||||
|
"github.com/sagernet/sing/common/binary"
|
||||||
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
|
"github.com/sagernet/sing/common/varbin"
|
||||||
|
|
||||||
|
"github.com/gofrs/uuid/v5"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (c *CommandClient) CloseConnection(connId string) error {
|
||||||
|
conn, err := c.directConnect()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
|
writer := bufio.NewWriter(conn)
|
||||||
|
err = varbin.Write(writer, binary.BigEndian, connId)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = writer.Flush()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return readError(conn)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *CommandServer) handleCloseConnection(conn net.Conn) error {
|
||||||
|
reader := bufio.NewReader(conn)
|
||||||
|
var connId string
|
||||||
|
err := varbin.Read(reader, binary.BigEndian, &connId)
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "read connection id")
|
||||||
|
}
|
||||||
|
service := s.service
|
||||||
|
if service == nil {
|
||||||
|
return writeError(conn, E.New("service not ready"))
|
||||||
|
}
|
||||||
|
clashServer := service.instance.Router().ClashServer()
|
||||||
|
if clashServer == nil {
|
||||||
|
return writeError(conn, E.New("Clash API disabled"))
|
||||||
|
}
|
||||||
|
targetConn := clashServer.(*clashapi.Server).TrafficManager().Connection(uuid.FromStringOrNil(connId))
|
||||||
|
if targetConn == nil {
|
||||||
|
return writeError(conn, E.New("connection already closed"))
|
||||||
|
}
|
||||||
|
targetConn.Close()
|
||||||
|
return writeError(conn, nil)
|
||||||
|
}
|
272
experimental/libbox/command_connections.go
Normal file
272
experimental/libbox/command_connections.go
Normal file
@ -0,0 +1,272 @@
|
|||||||
|
package libbox
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"net"
|
||||||
|
"slices"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/sagernet/sing-box/experimental/clashapi"
|
||||||
|
"github.com/sagernet/sing-box/experimental/clashapi/trafficontrol"
|
||||||
|
"github.com/sagernet/sing/common/binary"
|
||||||
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
|
M "github.com/sagernet/sing/common/metadata"
|
||||||
|
"github.com/sagernet/sing/common/varbin"
|
||||||
|
|
||||||
|
"github.com/gofrs/uuid/v5"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (c *CommandClient) handleConnectionsConn(conn net.Conn) {
|
||||||
|
defer conn.Close()
|
||||||
|
reader := bufio.NewReader(conn)
|
||||||
|
var (
|
||||||
|
rawConnections []Connection
|
||||||
|
connections Connections
|
||||||
|
)
|
||||||
|
for {
|
||||||
|
err := varbin.Read(reader, binary.BigEndian, &rawConnections)
|
||||||
|
if err != nil {
|
||||||
|
c.handler.Disconnected(err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
connections.input = rawConnections
|
||||||
|
c.handler.WriteConnections(&connections)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *CommandServer) handleConnectionsConn(conn net.Conn) error {
|
||||||
|
var interval int64
|
||||||
|
err := binary.Read(conn, binary.BigEndian, &interval)
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "read interval")
|
||||||
|
}
|
||||||
|
ticker := time.NewTicker(time.Duration(interval))
|
||||||
|
defer ticker.Stop()
|
||||||
|
ctx := connKeepAlive(conn)
|
||||||
|
var trafficManager *trafficontrol.Manager
|
||||||
|
for {
|
||||||
|
service := s.service
|
||||||
|
if service != nil {
|
||||||
|
clashServer := service.instance.Router().ClashServer()
|
||||||
|
if clashServer == nil {
|
||||||
|
return E.New("Clash API disabled")
|
||||||
|
}
|
||||||
|
trafficManager = clashServer.(*clashapi.Server).TrafficManager()
|
||||||
|
break
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
case <-ticker.C:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
connections = make(map[uuid.UUID]*Connection)
|
||||||
|
outConnections []Connection
|
||||||
|
)
|
||||||
|
writer := bufio.NewWriter(conn)
|
||||||
|
for {
|
||||||
|
outConnections = outConnections[:0]
|
||||||
|
for _, connection := range trafficManager.Connections() {
|
||||||
|
outConnections = append(outConnections, newConnection(connections, connection, false))
|
||||||
|
}
|
||||||
|
for _, connection := range trafficManager.ClosedConnections() {
|
||||||
|
outConnections = append(outConnections, newConnection(connections, connection, true))
|
||||||
|
}
|
||||||
|
err = varbin.Write(writer, binary.BigEndian, outConnections)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = writer.Flush()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
case <-ticker.C:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
ConnectionStateAll = iota
|
||||||
|
ConnectionStateActive
|
||||||
|
ConnectionStateClosed
|
||||||
|
)
|
||||||
|
|
||||||
|
type Connections struct {
|
||||||
|
input []Connection
|
||||||
|
filtered []Connection
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Connections) FilterState(state int32) {
|
||||||
|
c.filtered = c.filtered[:0]
|
||||||
|
switch state {
|
||||||
|
case ConnectionStateAll:
|
||||||
|
c.filtered = append(c.filtered, c.input...)
|
||||||
|
case ConnectionStateActive:
|
||||||
|
for _, connection := range c.input {
|
||||||
|
if connection.ClosedAt == 0 {
|
||||||
|
c.filtered = append(c.filtered, connection)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case ConnectionStateClosed:
|
||||||
|
for _, connection := range c.input {
|
||||||
|
if connection.ClosedAt != 0 {
|
||||||
|
c.filtered = append(c.filtered, connection)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Connections) SortByDate() {
|
||||||
|
slices.SortStableFunc(c.filtered, func(x, y Connection) int {
|
||||||
|
if x.CreatedAt < y.CreatedAt {
|
||||||
|
return 1
|
||||||
|
} else if x.CreatedAt > y.CreatedAt {
|
||||||
|
return -1
|
||||||
|
} else {
|
||||||
|
return strings.Compare(y.ID, x.ID)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Connections) SortByTraffic() {
|
||||||
|
slices.SortStableFunc(c.filtered, func(x, y Connection) int {
|
||||||
|
xTraffic := x.Uplink + x.Downlink
|
||||||
|
yTraffic := y.Uplink + y.Downlink
|
||||||
|
if xTraffic < yTraffic {
|
||||||
|
return 1
|
||||||
|
} else if xTraffic > yTraffic {
|
||||||
|
return -1
|
||||||
|
} else {
|
||||||
|
return strings.Compare(y.ID, x.ID)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Connections) SortByTrafficTotal() {
|
||||||
|
slices.SortStableFunc(c.filtered, func(x, y Connection) int {
|
||||||
|
xTraffic := x.UplinkTotal + x.DownlinkTotal
|
||||||
|
yTraffic := y.UplinkTotal + y.DownlinkTotal
|
||||||
|
if xTraffic < yTraffic {
|
||||||
|
return 1
|
||||||
|
} else if xTraffic > yTraffic {
|
||||||
|
return -1
|
||||||
|
} else {
|
||||||
|
return strings.Compare(y.ID, x.ID)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Connections) Iterator() ConnectionIterator {
|
||||||
|
return newPtrIterator(c.filtered)
|
||||||
|
}
|
||||||
|
|
||||||
|
type Connection struct {
|
||||||
|
ID string
|
||||||
|
Inbound string
|
||||||
|
InboundType string
|
||||||
|
IPVersion int32
|
||||||
|
Network string
|
||||||
|
Source string
|
||||||
|
Destination string
|
||||||
|
Domain string
|
||||||
|
Protocol string
|
||||||
|
User string
|
||||||
|
FromOutbound string
|
||||||
|
CreatedAt int64
|
||||||
|
ClosedAt int64
|
||||||
|
Uplink int64
|
||||||
|
Downlink int64
|
||||||
|
UplinkTotal int64
|
||||||
|
DownlinkTotal int64
|
||||||
|
Rule string
|
||||||
|
Outbound string
|
||||||
|
OutboundType string
|
||||||
|
ChainList []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Connection) Chain() StringIterator {
|
||||||
|
return newIterator(c.ChainList)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Connection) DisplayDestination() string {
|
||||||
|
destination := M.ParseSocksaddr(c.Destination)
|
||||||
|
if destination.IsIP() && c.Domain != "" {
|
||||||
|
destination = M.Socksaddr{
|
||||||
|
Fqdn: c.Domain,
|
||||||
|
Port: destination.Port,
|
||||||
|
}
|
||||||
|
return destination.String()
|
||||||
|
}
|
||||||
|
return c.Destination
|
||||||
|
}
|
||||||
|
|
||||||
|
type ConnectionIterator interface {
|
||||||
|
Next() *Connection
|
||||||
|
HasNext() bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func newConnection(connections map[uuid.UUID]*Connection, metadata trafficontrol.TrackerMetadata, isClosed bool) Connection {
|
||||||
|
if oldConnection, loaded := connections[metadata.ID]; loaded {
|
||||||
|
if isClosed {
|
||||||
|
if oldConnection.ClosedAt == 0 {
|
||||||
|
oldConnection.Uplink = 0
|
||||||
|
oldConnection.Downlink = 0
|
||||||
|
oldConnection.ClosedAt = metadata.ClosedAt.UnixMilli()
|
||||||
|
}
|
||||||
|
return *oldConnection
|
||||||
|
}
|
||||||
|
lastUplink := oldConnection.UplinkTotal
|
||||||
|
lastDownlink := oldConnection.DownlinkTotal
|
||||||
|
uplinkTotal := metadata.Upload.Load()
|
||||||
|
downlinkTotal := metadata.Download.Load()
|
||||||
|
oldConnection.Uplink = uplinkTotal - lastUplink
|
||||||
|
oldConnection.Downlink = downlinkTotal - lastDownlink
|
||||||
|
oldConnection.UplinkTotal = uplinkTotal
|
||||||
|
oldConnection.DownlinkTotal = downlinkTotal
|
||||||
|
return *oldConnection
|
||||||
|
}
|
||||||
|
var rule string
|
||||||
|
if metadata.Rule != nil {
|
||||||
|
rule = metadata.Rule.String()
|
||||||
|
}
|
||||||
|
uplinkTotal := metadata.Upload.Load()
|
||||||
|
downlinkTotal := metadata.Download.Load()
|
||||||
|
uplink := uplinkTotal
|
||||||
|
downlink := downlinkTotal
|
||||||
|
var closedAt int64
|
||||||
|
if !metadata.ClosedAt.IsZero() {
|
||||||
|
closedAt = metadata.ClosedAt.UnixMilli()
|
||||||
|
uplink = 0
|
||||||
|
downlink = 0
|
||||||
|
}
|
||||||
|
connection := Connection{
|
||||||
|
ID: metadata.ID.String(),
|
||||||
|
Inbound: metadata.Metadata.Inbound,
|
||||||
|
InboundType: metadata.Metadata.InboundType,
|
||||||
|
IPVersion: int32(metadata.Metadata.IPVersion),
|
||||||
|
Network: metadata.Metadata.Network,
|
||||||
|
Source: metadata.Metadata.Source.String(),
|
||||||
|
Destination: metadata.Metadata.Destination.String(),
|
||||||
|
Domain: metadata.Metadata.Domain,
|
||||||
|
Protocol: metadata.Metadata.Protocol,
|
||||||
|
User: metadata.Metadata.User,
|
||||||
|
FromOutbound: metadata.Metadata.Outbound,
|
||||||
|
CreatedAt: metadata.CreatedAt.UnixMilli(),
|
||||||
|
ClosedAt: closedAt,
|
||||||
|
Uplink: uplink,
|
||||||
|
Downlink: downlink,
|
||||||
|
UplinkTotal: uplinkTotal,
|
||||||
|
DownlinkTotal: downlinkTotal,
|
||||||
|
Rule: rule,
|
||||||
|
Outbound: metadata.Outbound,
|
||||||
|
OutboundType: metadata.OutboundType,
|
||||||
|
ChainList: metadata.Chain,
|
||||||
|
}
|
||||||
|
connections[metadata.ID] = &connection
|
||||||
|
return connection
|
||||||
|
}
|
@ -1,6 +1,7 @@
|
|||||||
package libbox
|
package libbox
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bufio"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
@ -10,40 +11,10 @@ import (
|
|||||||
"github.com/sagernet/sing-box/common/urltest"
|
"github.com/sagernet/sing-box/common/urltest"
|
||||||
"github.com/sagernet/sing-box/outbound"
|
"github.com/sagernet/sing-box/outbound"
|
||||||
E "github.com/sagernet/sing/common/exceptions"
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
"github.com/sagernet/sing/common/rw"
|
"github.com/sagernet/sing/common/varbin"
|
||||||
"github.com/sagernet/sing/service"
|
"github.com/sagernet/sing/service"
|
||||||
)
|
)
|
||||||
|
|
||||||
type OutboundGroup struct {
|
|
||||||
Tag string
|
|
||||||
Type string
|
|
||||||
Selectable bool
|
|
||||||
Selected string
|
|
||||||
IsExpand bool
|
|
||||||
items []*OutboundGroupItem
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *OutboundGroup) GetItems() OutboundGroupItemIterator {
|
|
||||||
return newIterator(g.items)
|
|
||||||
}
|
|
||||||
|
|
||||||
type OutboundGroupIterator interface {
|
|
||||||
Next() *OutboundGroup
|
|
||||||
HasNext() bool
|
|
||||||
}
|
|
||||||
|
|
||||||
type OutboundGroupItem struct {
|
|
||||||
Tag string
|
|
||||||
Type string
|
|
||||||
URLTestTime int64
|
|
||||||
URLTestDelay int32
|
|
||||||
}
|
|
||||||
|
|
||||||
type OutboundGroupItemIterator interface {
|
|
||||||
Next() *OutboundGroupItem
|
|
||||||
HasNext() bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *CommandClient) handleGroupConn(conn net.Conn) {
|
func (c *CommandClient) handleGroupConn(conn net.Conn) {
|
||||||
defer conn.Close()
|
defer conn.Close()
|
||||||
|
|
||||||
@ -66,19 +37,24 @@ func (s *CommandServer) handleGroupConn(conn net.Conn) error {
|
|||||||
ticker := time.NewTicker(time.Duration(interval))
|
ticker := time.NewTicker(time.Duration(interval))
|
||||||
defer ticker.Stop()
|
defer ticker.Stop()
|
||||||
ctx := connKeepAlive(conn)
|
ctx := connKeepAlive(conn)
|
||||||
|
writer := bufio.NewWriter(conn)
|
||||||
for {
|
for {
|
||||||
service := s.service
|
service := s.service
|
||||||
if service != nil {
|
if service != nil {
|
||||||
err := writeGroups(conn, service)
|
err = writeGroups(writer, service)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
err := binary.Write(conn, binary.BigEndian, uint16(0))
|
err = binary.Write(writer, binary.BigEndian, uint16(0))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
err = writer.Flush()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
@ -92,74 +68,41 @@ func (s *CommandServer) handleGroupConn(conn net.Conn) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type OutboundGroup struct {
|
||||||
|
Tag string
|
||||||
|
Type string
|
||||||
|
Selectable bool
|
||||||
|
Selected string
|
||||||
|
IsExpand bool
|
||||||
|
ItemList []*OutboundGroupItem
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *OutboundGroup) GetItems() OutboundGroupItemIterator {
|
||||||
|
return newIterator(g.ItemList)
|
||||||
|
}
|
||||||
|
|
||||||
|
type OutboundGroupIterator interface {
|
||||||
|
Next() *OutboundGroup
|
||||||
|
HasNext() bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type OutboundGroupItem struct {
|
||||||
|
Tag string
|
||||||
|
Type string
|
||||||
|
URLTestTime int64
|
||||||
|
URLTestDelay int32
|
||||||
|
}
|
||||||
|
|
||||||
|
type OutboundGroupItemIterator interface {
|
||||||
|
Next() *OutboundGroupItem
|
||||||
|
HasNext() bool
|
||||||
|
}
|
||||||
|
|
||||||
func readGroups(reader io.Reader) (OutboundGroupIterator, error) {
|
func readGroups(reader io.Reader) (OutboundGroupIterator, error) {
|
||||||
var groupLength uint16
|
groups, err := varbin.ReadValue[[]*OutboundGroup](reader, binary.BigEndian)
|
||||||
err := binary.Read(reader, binary.BigEndian, &groupLength)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
groups := make([]*OutboundGroup, 0, groupLength)
|
|
||||||
for i := 0; i < int(groupLength); i++ {
|
|
||||||
var group OutboundGroup
|
|
||||||
group.Tag, err = rw.ReadVString(reader)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
group.Type, err = rw.ReadVString(reader)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = binary.Read(reader, binary.BigEndian, &group.Selectable)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
group.Selected, err = rw.ReadVString(reader)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = binary.Read(reader, binary.BigEndian, &group.IsExpand)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var itemLength uint16
|
|
||||||
err = binary.Read(reader, binary.BigEndian, &itemLength)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
group.items = make([]*OutboundGroupItem, itemLength)
|
|
||||||
for j := 0; j < int(itemLength); j++ {
|
|
||||||
var item OutboundGroupItem
|
|
||||||
item.Tag, err = rw.ReadVString(reader)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
item.Type, err = rw.ReadVString(reader)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = binary.Read(reader, binary.BigEndian, &item.URLTestTime)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = binary.Read(reader, binary.BigEndian, &item.URLTestDelay)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
group.items[j] = &item
|
|
||||||
}
|
|
||||||
groups = append(groups, &group)
|
|
||||||
}
|
|
||||||
return newIterator(groups), nil
|
return newIterator(groups), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -199,63 +142,14 @@ func writeGroups(writer io.Writer, boxService *BoxService) error {
|
|||||||
item.URLTestTime = history.Time.Unix()
|
item.URLTestTime = history.Time.Unix()
|
||||||
item.URLTestDelay = int32(history.Delay)
|
item.URLTestDelay = int32(history.Delay)
|
||||||
}
|
}
|
||||||
group.items = append(group.items, &item)
|
group.ItemList = append(group.ItemList, &item)
|
||||||
}
|
}
|
||||||
if len(group.items) < 2 {
|
if len(group.ItemList) < 2 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
groups = append(groups, group)
|
groups = append(groups, group)
|
||||||
}
|
}
|
||||||
|
return varbin.Write(writer, binary.BigEndian, groups)
|
||||||
err := binary.Write(writer, binary.BigEndian, uint16(len(groups)))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, group := range groups {
|
|
||||||
err = rw.WriteVString(writer, group.Tag)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = rw.WriteVString(writer, group.Type)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = binary.Write(writer, binary.BigEndian, group.Selectable)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = rw.WriteVString(writer, group.Selected)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = binary.Write(writer, binary.BigEndian, group.IsExpand)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = binary.Write(writer, binary.BigEndian, uint16(len(group.items)))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, item := range group.items {
|
|
||||||
err = rw.WriteVString(writer, item.Tag)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = rw.WriteVString(writer, item.Type)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = binary.Write(writer, binary.BigEndian, item.URLTestTime)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = binary.Write(writer, binary.BigEndian, item.URLTestDelay)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *CommandClient) SetGroupExpand(groupTag string, isExpand bool) error {
|
func (c *CommandClient) SetGroupExpand(groupTag string, isExpand bool) error {
|
||||||
@ -268,7 +162,7 @@ func (c *CommandClient) SetGroupExpand(groupTag string, isExpand bool) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err = rw.WriteVString(conn, groupTag)
|
err = varbin.Write(conn, binary.BigEndian, groupTag)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -280,7 +174,7 @@ func (c *CommandClient) SetGroupExpand(groupTag string, isExpand bool) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *CommandServer) handleSetGroupExpand(conn net.Conn) error {
|
func (s *CommandServer) handleSetGroupExpand(conn net.Conn) error {
|
||||||
groupTag, err := rw.ReadVString(conn)
|
groupTag, err := varbin.ReadValue[string](conn, binary.BigEndian)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -1,12 +1,27 @@
|
|||||||
package libbox
|
package libbox
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bufio"
|
||||||
"context"
|
"context"
|
||||||
"encoding/binary"
|
|
||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/sagernet/sing/common/binary"
|
||||||
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
|
"github.com/sagernet/sing/common/varbin"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func (s *CommandServer) ResetLog() {
|
||||||
|
s.access.Lock()
|
||||||
|
defer s.access.Unlock()
|
||||||
|
s.savedLines.Init()
|
||||||
|
select {
|
||||||
|
case s.logReset <- struct{}{}:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (s *CommandServer) WriteMessage(message string) {
|
func (s *CommandServer) WriteMessage(message string) {
|
||||||
s.subscriber.Emit(message)
|
s.subscriber.Emit(message)
|
||||||
s.access.Lock()
|
s.access.Lock()
|
||||||
@ -17,43 +32,19 @@ func (s *CommandServer) WriteMessage(message string) {
|
|||||||
s.access.Unlock()
|
s.access.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func readLog(reader io.Reader) ([]byte, error) {
|
|
||||||
var messageLength uint16
|
|
||||||
err := binary.Read(reader, binary.BigEndian, &messageLength)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if messageLength == 0 {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
data := make([]byte, messageLength)
|
|
||||||
_, err = io.ReadFull(reader, data)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return data, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeLog(writer io.Writer, message []byte) error {
|
|
||||||
err := binary.Write(writer, binary.BigEndian, uint8(0))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = binary.Write(writer, binary.BigEndian, uint16(len(message)))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if len(message) > 0 {
|
|
||||||
_, err = writer.Write(message)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeClearLog(writer io.Writer) error {
|
|
||||||
return binary.Write(writer, binary.BigEndian, uint8(1))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *CommandServer) handleLogConn(conn net.Conn) error {
|
func (s *CommandServer) handleLogConn(conn net.Conn) error {
|
||||||
|
var (
|
||||||
|
interval int64
|
||||||
|
timer *time.Timer
|
||||||
|
)
|
||||||
|
err := binary.Read(conn, binary.BigEndian, &interval)
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "read interval")
|
||||||
|
}
|
||||||
|
timer = time.NewTimer(time.Duration(interval))
|
||||||
|
if !timer.Stop() {
|
||||||
|
<-timer.C
|
||||||
|
}
|
||||||
var savedLines []string
|
var savedLines []string
|
||||||
s.access.Lock()
|
s.access.Lock()
|
||||||
savedLines = make([]string, 0, s.savedLines.Len())
|
savedLines = make([]string, 0, s.savedLines.Len())
|
||||||
@ -66,52 +57,90 @@ func (s *CommandServer) handleLogConn(conn net.Conn) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer s.observer.UnSubscribe(subscription)
|
defer s.observer.UnSubscribe(subscription)
|
||||||
for _, line := range savedLines {
|
writer := bufio.NewWriter(conn)
|
||||||
err = writeLog(conn, []byte(line))
|
select {
|
||||||
|
case <-s.logReset:
|
||||||
|
err = writer.WriteByte(1)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = writer.Flush()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
if len(savedLines) > 0 {
|
||||||
|
err = writer.WriteByte(0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = varbin.Write(writer, binary.BigEndian, savedLines)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ctx := connKeepAlive(conn)
|
ctx := connKeepAlive(conn)
|
||||||
|
var logLines []string
|
||||||
for {
|
for {
|
||||||
|
err = writer.Flush()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
case message := <-subscription:
|
|
||||||
err = writeLog(conn, []byte(message))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
case <-s.logReset:
|
case <-s.logReset:
|
||||||
err = writeClearLog(conn)
|
err = writer.WriteByte(1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
case <-done:
|
case <-done:
|
||||||
return nil
|
return nil
|
||||||
|
case logLine := <-subscription:
|
||||||
|
logLines = logLines[:0]
|
||||||
|
logLines = append(logLines, logLine)
|
||||||
|
timer.Reset(time.Duration(interval))
|
||||||
|
loopLogs:
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case logLine = <-subscription:
|
||||||
|
logLines = append(logLines, logLine)
|
||||||
|
case <-timer.C:
|
||||||
|
break loopLogs
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = writer.WriteByte(0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = varbin.Write(writer, binary.BigEndian, logLines)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *CommandClient) handleLogConn(conn net.Conn) {
|
func (c *CommandClient) handleLogConn(conn net.Conn) {
|
||||||
|
reader := bufio.NewReader(conn)
|
||||||
for {
|
for {
|
||||||
var messageType uint8
|
messageType, err := reader.ReadByte()
|
||||||
err := binary.Read(conn, binary.BigEndian, &messageType)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.handler.Disconnected(err.Error())
|
c.handler.Disconnected(err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
var message []byte
|
var messages []string
|
||||||
switch messageType {
|
switch messageType {
|
||||||
case 0:
|
case 0:
|
||||||
message, err = readLog(conn)
|
err = varbin.Read(reader, binary.BigEndian, &messages)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.handler.Disconnected(err.Error())
|
c.handler.Disconnected(err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
c.handler.WriteLog(string(message))
|
c.handler.WriteLogs(newIterator(messages))
|
||||||
case 1:
|
case 1:
|
||||||
c.handler.ClearLog()
|
c.handler.ClearLogs()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -120,7 +149,7 @@ func connKeepAlive(reader io.Reader) context.Context {
|
|||||||
ctx, cancel := context.WithCancelCause(context.Background())
|
ctx, cancel := context.WithCancelCause(context.Background())
|
||||||
go func() {
|
go func() {
|
||||||
for {
|
for {
|
||||||
_, err := readLog(reader)
|
_, err := reader.Read(make([]byte, 1))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cancel(err)
|
cancel(err)
|
||||||
return
|
return
|
||||||
|
@ -5,7 +5,7 @@ import (
|
|||||||
"net"
|
"net"
|
||||||
|
|
||||||
E "github.com/sagernet/sing/common/exceptions"
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
"github.com/sagernet/sing/common/rw"
|
"github.com/sagernet/sing/common/varbin"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (c *CommandClient) ServiceReload() error {
|
func (c *CommandClient) ServiceReload() error {
|
||||||
@ -24,7 +24,7 @@ func (c *CommandClient) ServiceReload() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if hasError {
|
if hasError {
|
||||||
errorMessage, err := rw.ReadVString(conn)
|
errorMessage, err := varbin.ReadValue[string](conn, binary.BigEndian)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -40,7 +40,7 @@ func (s *CommandServer) handleServiceReload(conn net.Conn) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if rErr != nil {
|
if rErr != nil {
|
||||||
return rw.WriteVString(conn, rErr.Error())
|
return varbin.Write(conn, binary.BigEndian, rErr.Error())
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -61,7 +61,7 @@ func (c *CommandClient) ServiceClose() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if hasError {
|
if hasError {
|
||||||
errorMessage, err := rw.ReadVString(conn)
|
errorMessage, err := varbin.ReadValue[string](conn, binary.BigEndian)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -78,7 +78,7 @@ func (s *CommandServer) handleServiceClose(conn net.Conn) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if rErr != nil {
|
if rErr != nil {
|
||||||
return rw.WriteVString(conn, rErr.Error())
|
return varbin.Write(conn, binary.BigEndian, rErr.Error())
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -6,7 +6,7 @@ import (
|
|||||||
|
|
||||||
"github.com/sagernet/sing-box/outbound"
|
"github.com/sagernet/sing-box/outbound"
|
||||||
E "github.com/sagernet/sing/common/exceptions"
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
"github.com/sagernet/sing/common/rw"
|
"github.com/sagernet/sing/common/varbin"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (c *CommandClient) SelectOutbound(groupTag string, outboundTag string) error {
|
func (c *CommandClient) SelectOutbound(groupTag string, outboundTag string) error {
|
||||||
@ -19,11 +19,11 @@ func (c *CommandClient) SelectOutbound(groupTag string, outboundTag string) erro
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err = rw.WriteVString(conn, groupTag)
|
err = varbin.Write(conn, binary.BigEndian, groupTag)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err = rw.WriteVString(conn, outboundTag)
|
err = varbin.Write(conn, binary.BigEndian, outboundTag)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -31,11 +31,11 @@ func (c *CommandClient) SelectOutbound(groupTag string, outboundTag string) erro
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *CommandServer) handleSelectOutbound(conn net.Conn) error {
|
func (s *CommandServer) handleSelectOutbound(conn net.Conn) error {
|
||||||
groupTag, err := rw.ReadVString(conn)
|
groupTag, err := varbin.ReadValue[string](conn, binary.BigEndian)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
outboundTag, err := rw.ReadVString(conn)
|
outboundTag, err := varbin.ReadValue[string](conn, binary.BigEndian)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -33,6 +33,8 @@ type CommandServer struct {
|
|||||||
urlTestUpdate chan struct{}
|
urlTestUpdate chan struct{}
|
||||||
modeUpdate chan struct{}
|
modeUpdate chan struct{}
|
||||||
logReset chan struct{}
|
logReset chan struct{}
|
||||||
|
|
||||||
|
closedConnections []Connection
|
||||||
}
|
}
|
||||||
|
|
||||||
type CommandServerHandler interface {
|
type CommandServerHandler interface {
|
||||||
@ -64,14 +66,6 @@ func (s *CommandServer) SetService(newService *BoxService) {
|
|||||||
s.notifyURLTestUpdate()
|
s.notifyURLTestUpdate()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *CommandServer) ResetLog() {
|
|
||||||
s.savedLines.Init()
|
|
||||||
select {
|
|
||||||
case s.logReset <- struct{}{}:
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *CommandServer) notifyURLTestUpdate() {
|
func (s *CommandServer) notifyURLTestUpdate() {
|
||||||
select {
|
select {
|
||||||
case s.urlTestUpdate <- struct{}{}:
|
case s.urlTestUpdate <- struct{}{}:
|
||||||
@ -176,6 +170,10 @@ func (s *CommandServer) handleConnection(conn net.Conn) error {
|
|||||||
return s.handleGetSystemProxyStatus(conn)
|
return s.handleGetSystemProxyStatus(conn)
|
||||||
case CommandSetSystemProxyEnabled:
|
case CommandSetSystemProxyEnabled:
|
||||||
return s.handleSetSystemProxyEnabled(conn)
|
return s.handleSetSystemProxyEnabled(conn)
|
||||||
|
case CommandConnections:
|
||||||
|
return s.handleConnectionsConn(conn)
|
||||||
|
case CommandCloseConnection:
|
||||||
|
return s.handleCloseConnection(conn)
|
||||||
default:
|
default:
|
||||||
return E.New("unknown command: ", command)
|
return E.New("unknown command: ", command)
|
||||||
}
|
}
|
||||||
|
@ -5,7 +5,7 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
|
|
||||||
E "github.com/sagernet/sing/common/exceptions"
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
"github.com/sagernet/sing/common/rw"
|
"github.com/sagernet/sing/common/varbin"
|
||||||
)
|
)
|
||||||
|
|
||||||
func readError(reader io.Reader) error {
|
func readError(reader io.Reader) error {
|
||||||
@ -15,7 +15,7 @@ func readError(reader io.Reader) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if hasError {
|
if hasError {
|
||||||
errorMessage, err := rw.ReadVString(reader)
|
errorMessage, err := varbin.ReadValue[string](reader, binary.BigEndian)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -30,7 +30,7 @@ func writeError(writer io.Writer, wErr error) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if wErr != nil {
|
if wErr != nil {
|
||||||
err = rw.WriteVString(writer, wErr.Error())
|
err = varbin.Write(writer, binary.BigEndian, wErr.Error())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -36,7 +36,7 @@ func (s *CommandServer) readStatus() StatusMessage {
|
|||||||
trafficManager := clashServer.(*clashapi.Server).TrafficManager()
|
trafficManager := clashServer.(*clashapi.Server).TrafficManager()
|
||||||
message.Uplink, message.Downlink = trafficManager.Now()
|
message.Uplink, message.Downlink = trafficManager.Now()
|
||||||
message.UplinkTotal, message.DownlinkTotal = trafficManager.Total()
|
message.UplinkTotal, message.DownlinkTotal = trafficManager.Total()
|
||||||
message.ConnectionsIn = int32(trafficManager.Connections())
|
message.ConnectionsIn = int32(trafficManager.ConnectionsLen())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user