mirror of
https://github.com/SagerNet/sing-box.git
synced 2025-06-13 21:54:13 +08:00
Compare commits
14 Commits
dev-next
...
v1.8.0-alp
Author | SHA1 | Date | |
---|---|---|---|
![]() |
b378e03960 | ||
![]() |
c6078f5bf0 | ||
![]() |
39ef7a31ee | ||
![]() |
3d69034742 | ||
![]() |
030e90f8a7 | ||
![]() |
2fb4d26f9f | ||
![]() |
7a0456b895 | ||
![]() |
ab684eedfc | ||
![]() |
75f07fe479 | ||
![]() |
be7ff4f801 | ||
![]() |
f27ef8151a | ||
![]() |
7674ec88bc | ||
![]() |
ce99c4a709 | ||
![]() |
29e688443f |
1
.gitignore
vendored
1
.gitignore
vendored
@ -1,6 +1,7 @@
|
|||||||
/.idea/
|
/.idea/
|
||||||
/vendor/
|
/vendor/
|
||||||
/*.json
|
/*.json
|
||||||
|
/*.srs
|
||||||
/*.db
|
/*.db
|
||||||
/site/
|
/site/
|
||||||
/bin/
|
/bin/
|
||||||
|
@ -1,11 +1,16 @@
|
|||||||
package adapter
|
package adapter
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/binary"
|
||||||
|
"io"
|
||||||
"net"
|
"net"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/sagernet/sing-box/common/urltest"
|
"github.com/sagernet/sing-box/common/urltest"
|
||||||
N "github.com/sagernet/sing/common/network"
|
N "github.com/sagernet/sing/common/network"
|
||||||
|
"github.com/sagernet/sing/common/rw"
|
||||||
)
|
)
|
||||||
|
|
||||||
type ClashServer interface {
|
type ClashServer interface {
|
||||||
@ -13,22 +18,83 @@ type ClashServer interface {
|
|||||||
PreStarter
|
PreStarter
|
||||||
Mode() string
|
Mode() string
|
||||||
ModeList() []string
|
ModeList() []string
|
||||||
StoreSelected() bool
|
|
||||||
StoreFakeIP() bool
|
|
||||||
CacheFile() ClashCacheFile
|
|
||||||
HistoryStorage() *urltest.HistoryStorage
|
HistoryStorage() *urltest.HistoryStorage
|
||||||
RoutedConnection(ctx context.Context, conn net.Conn, metadata InboundContext, matchedRule Rule) (net.Conn, Tracker)
|
RoutedConnection(ctx context.Context, conn net.Conn, metadata InboundContext, matchedRule Rule) (net.Conn, Tracker)
|
||||||
RoutedPacketConnection(ctx context.Context, conn N.PacketConn, metadata InboundContext, matchedRule Rule) (N.PacketConn, Tracker)
|
RoutedPacketConnection(ctx context.Context, conn N.PacketConn, metadata InboundContext, matchedRule Rule) (N.PacketConn, Tracker)
|
||||||
}
|
}
|
||||||
|
|
||||||
type ClashCacheFile interface {
|
type CacheFile interface {
|
||||||
|
Service
|
||||||
|
PreStarter
|
||||||
|
|
||||||
|
StoreFakeIP() bool
|
||||||
|
FakeIPStorage
|
||||||
|
|
||||||
LoadMode() string
|
LoadMode() string
|
||||||
StoreMode(mode string) error
|
StoreMode(mode string) error
|
||||||
LoadSelected(group string) string
|
LoadSelected(group string) string
|
||||||
StoreSelected(group string, selected string) error
|
StoreSelected(group string, selected string) error
|
||||||
LoadGroupExpand(group string) (isExpand bool, loaded bool)
|
LoadGroupExpand(group string) (isExpand bool, loaded bool)
|
||||||
StoreGroupExpand(group string, expand bool) error
|
StoreGroupExpand(group string, expand bool) error
|
||||||
FakeIPStorage
|
LoadRuleSet(tag string) *SavedRuleSet
|
||||||
|
SaveRuleSet(tag string, set *SavedRuleSet) error
|
||||||
|
}
|
||||||
|
|
||||||
|
type SavedRuleSet struct {
|
||||||
|
Content []byte
|
||||||
|
LastUpdated time.Time
|
||||||
|
LastEtag string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SavedRuleSet) MarshalBinary() ([]byte, error) {
|
||||||
|
var buffer bytes.Buffer
|
||||||
|
err := binary.Write(&buffer, binary.BigEndian, uint8(1))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
err = rw.WriteUVariant(&buffer, uint64(len(s.Content)))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
buffer.Write(s.Content)
|
||||||
|
err = binary.Write(&buffer, binary.BigEndian, s.LastUpdated.Unix())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
err = rw.WriteVString(&buffer, s.LastEtag)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return buffer.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SavedRuleSet) UnmarshalBinary(data []byte) error {
|
||||||
|
reader := bytes.NewReader(data)
|
||||||
|
var version uint8
|
||||||
|
err := binary.Read(reader, binary.BigEndian, &version)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
contentLen, err := rw.ReadUVariant(reader)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
s.Content = make([]byte, contentLen)
|
||||||
|
_, err = io.ReadFull(reader, s.Content)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var lastUpdated int64
|
||||||
|
err = binary.Read(reader, binary.BigEndian, &lastUpdated)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
s.LastUpdated = time.Unix(lastUpdated, 0)
|
||||||
|
s.LastEtag, err = rw.ReadVString(reader)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type Tracker interface {
|
type Tracker interface {
|
||||||
|
@ -46,11 +46,24 @@ type InboundContext struct {
|
|||||||
SourceGeoIPCode string
|
SourceGeoIPCode string
|
||||||
GeoIPCode string
|
GeoIPCode string
|
||||||
ProcessInfo *process.Info
|
ProcessInfo *process.Info
|
||||||
|
QueryType uint16
|
||||||
FakeIP bool
|
FakeIP bool
|
||||||
|
|
||||||
// dns cache
|
// rule cache
|
||||||
|
|
||||||
QueryType uint16
|
IPCIDRMatchSource bool
|
||||||
|
SourceAddressMatch bool
|
||||||
|
SourcePortMatch bool
|
||||||
|
DestinationAddressMatch bool
|
||||||
|
DestinationPortMatch bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *InboundContext) ResetRuleCache() {
|
||||||
|
c.IPCIDRMatchSource = false
|
||||||
|
c.SourceAddressMatch = false
|
||||||
|
c.SourcePortMatch = false
|
||||||
|
c.DestinationAddressMatch = false
|
||||||
|
c.DestinationPortMatch = false
|
||||||
}
|
}
|
||||||
|
|
||||||
type inboundContextKey struct{}
|
type inboundContextKey struct{}
|
||||||
|
@ -2,12 +2,14 @@ package adapter
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"net/http"
|
||||||
"net/netip"
|
"net/netip"
|
||||||
|
|
||||||
"github.com/sagernet/sing-box/common/geoip"
|
"github.com/sagernet/sing-box/common/geoip"
|
||||||
"github.com/sagernet/sing-dns"
|
"github.com/sagernet/sing-dns"
|
||||||
"github.com/sagernet/sing-tun"
|
"github.com/sagernet/sing-tun"
|
||||||
"github.com/sagernet/sing/common/control"
|
"github.com/sagernet/sing/common/control"
|
||||||
|
N "github.com/sagernet/sing/common/network"
|
||||||
"github.com/sagernet/sing/service"
|
"github.com/sagernet/sing/service"
|
||||||
|
|
||||||
mdns "github.com/miekg/dns"
|
mdns "github.com/miekg/dns"
|
||||||
@ -19,7 +21,7 @@ type Router interface {
|
|||||||
|
|
||||||
Outbounds() []Outbound
|
Outbounds() []Outbound
|
||||||
Outbound(tag string) (Outbound, bool)
|
Outbound(tag string) (Outbound, bool)
|
||||||
DefaultOutbound(network string) Outbound
|
DefaultOutbound(network string) (Outbound, error)
|
||||||
|
|
||||||
FakeIPStore() FakeIPStore
|
FakeIPStore() FakeIPStore
|
||||||
|
|
||||||
@ -28,6 +30,8 @@ type Router interface {
|
|||||||
GeoIPReader() *geoip.Reader
|
GeoIPReader() *geoip.Reader
|
||||||
LoadGeosite(code string) (Rule, error)
|
LoadGeosite(code string) (Rule, error)
|
||||||
|
|
||||||
|
RuleSet(tag string) (RuleSet, bool)
|
||||||
|
|
||||||
Exchange(ctx context.Context, message *mdns.Msg) (*mdns.Msg, error)
|
Exchange(ctx context.Context, message *mdns.Msg) (*mdns.Msg, error)
|
||||||
Lookup(ctx context.Context, domain string, strategy dns.DomainStrategy) ([]netip.Addr, error)
|
Lookup(ctx context.Context, domain string, strategy dns.DomainStrategy) ([]netip.Addr, error)
|
||||||
LookupDefault(ctx context.Context, domain string) ([]netip.Addr, error)
|
LookupDefault(ctx context.Context, domain string) ([]netip.Addr, error)
|
||||||
@ -62,11 +66,15 @@ func RouterFromContext(ctx context.Context) Router {
|
|||||||
return service.FromContext[Router](ctx)
|
return service.FromContext[Router](ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type HeadlessRule interface {
|
||||||
|
Match(metadata *InboundContext) bool
|
||||||
|
}
|
||||||
|
|
||||||
type Rule interface {
|
type Rule interface {
|
||||||
|
HeadlessRule
|
||||||
Service
|
Service
|
||||||
Type() string
|
Type() string
|
||||||
UpdateGeosite() error
|
UpdateGeosite() error
|
||||||
Match(metadata *InboundContext) bool
|
|
||||||
Outbound() string
|
Outbound() string
|
||||||
String() string
|
String() string
|
||||||
}
|
}
|
||||||
@ -77,6 +85,24 @@ type DNSRule interface {
|
|||||||
RewriteTTL() *uint32
|
RewriteTTL() *uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type RuleSet interface {
|
||||||
|
StartContext(ctx context.Context, startContext RuleSetStartContext) error
|
||||||
|
PostStart() error
|
||||||
|
Metadata() RuleSetMetadata
|
||||||
|
Close() error
|
||||||
|
HeadlessRule
|
||||||
|
}
|
||||||
|
|
||||||
|
type RuleSetMetadata struct {
|
||||||
|
ContainsProcessRule bool
|
||||||
|
ContainsWIFIRule bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type RuleSetStartContext interface {
|
||||||
|
HTTPClient(detour string, dialer N.Dialer) *http.Client
|
||||||
|
Close()
|
||||||
|
}
|
||||||
|
|
||||||
type InterfaceUpdateListener interface {
|
type InterfaceUpdateListener interface {
|
||||||
InterfaceUpdated()
|
InterfaceUpdated()
|
||||||
}
|
}
|
||||||
|
102
box.go
102
box.go
@ -9,7 +9,10 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/sagernet/sing-box/adapter"
|
"github.com/sagernet/sing-box/adapter"
|
||||||
|
"github.com/sagernet/sing-box/common/taskmonitor"
|
||||||
|
C "github.com/sagernet/sing-box/constant"
|
||||||
"github.com/sagernet/sing-box/experimental"
|
"github.com/sagernet/sing-box/experimental"
|
||||||
|
"github.com/sagernet/sing-box/experimental/cachefile"
|
||||||
"github.com/sagernet/sing-box/experimental/libbox/platform"
|
"github.com/sagernet/sing-box/experimental/libbox/platform"
|
||||||
"github.com/sagernet/sing-box/inbound"
|
"github.com/sagernet/sing-box/inbound"
|
||||||
"github.com/sagernet/sing-box/log"
|
"github.com/sagernet/sing-box/log"
|
||||||
@ -32,7 +35,8 @@ type Box struct {
|
|||||||
outbounds []adapter.Outbound
|
outbounds []adapter.Outbound
|
||||||
logFactory log.Factory
|
logFactory log.Factory
|
||||||
logger log.ContextLogger
|
logger log.ContextLogger
|
||||||
preServices map[string]adapter.Service
|
preServices1 map[string]adapter.Service
|
||||||
|
preServices2 map[string]adapter.Service
|
||||||
postServices map[string]adapter.Service
|
postServices map[string]adapter.Service
|
||||||
done chan struct{}
|
done chan struct{}
|
||||||
}
|
}
|
||||||
@ -45,17 +49,21 @@ type Options struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func New(options Options) (*Box, error) {
|
func New(options Options) (*Box, error) {
|
||||||
|
createdAt := time.Now()
|
||||||
ctx := options.Context
|
ctx := options.Context
|
||||||
if ctx == nil {
|
if ctx == nil {
|
||||||
ctx = context.Background()
|
ctx = context.Background()
|
||||||
}
|
}
|
||||||
ctx = service.ContextWithDefaultRegistry(ctx)
|
ctx = service.ContextWithDefaultRegistry(ctx)
|
||||||
ctx = pause.ContextWithDefaultManager(ctx)
|
ctx = pause.ContextWithDefaultManager(ctx)
|
||||||
createdAt := time.Now()
|
|
||||||
experimentalOptions := common.PtrValueOrDefault(options.Experimental)
|
experimentalOptions := common.PtrValueOrDefault(options.Experimental)
|
||||||
applyDebugOptions(common.PtrValueOrDefault(experimentalOptions.Debug))
|
applyDebugOptions(common.PtrValueOrDefault(experimentalOptions.Debug))
|
||||||
|
var needCacheFile bool
|
||||||
var needClashAPI bool
|
var needClashAPI bool
|
||||||
var needV2RayAPI bool
|
var needV2RayAPI bool
|
||||||
|
if experimentalOptions.CacheFile != nil && experimentalOptions.CacheFile.Enabled || options.PlatformLogWriter != nil {
|
||||||
|
needCacheFile = true
|
||||||
|
}
|
||||||
if experimentalOptions.ClashAPI != nil || options.PlatformLogWriter != nil {
|
if experimentalOptions.ClashAPI != nil || options.PlatformLogWriter != nil {
|
||||||
needClashAPI = true
|
needClashAPI = true
|
||||||
}
|
}
|
||||||
@ -145,8 +153,14 @@ func New(options Options) (*Box, error) {
|
|||||||
return nil, E.Cause(err, "initialize platform interface")
|
return nil, E.Cause(err, "initialize platform interface")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
preServices := make(map[string]adapter.Service)
|
preServices1 := make(map[string]adapter.Service)
|
||||||
|
preServices2 := make(map[string]adapter.Service)
|
||||||
postServices := make(map[string]adapter.Service)
|
postServices := make(map[string]adapter.Service)
|
||||||
|
if needCacheFile {
|
||||||
|
cacheFile := cachefile.NewCacheFile(ctx, common.PtrValueOrDefault(experimentalOptions.CacheFile))
|
||||||
|
preServices1["cache file"] = cacheFile
|
||||||
|
service.MustRegister[adapter.CacheFile](ctx, cacheFile)
|
||||||
|
}
|
||||||
if needClashAPI {
|
if needClashAPI {
|
||||||
clashAPIOptions := common.PtrValueOrDefault(experimentalOptions.ClashAPI)
|
clashAPIOptions := common.PtrValueOrDefault(experimentalOptions.ClashAPI)
|
||||||
clashAPIOptions.ModeList = experimental.CalculateClashModeList(options.Options)
|
clashAPIOptions.ModeList = experimental.CalculateClashModeList(options.Options)
|
||||||
@ -155,7 +169,7 @@ func New(options Options) (*Box, error) {
|
|||||||
return nil, E.Cause(err, "create clash api server")
|
return nil, E.Cause(err, "create clash api server")
|
||||||
}
|
}
|
||||||
router.SetClashServer(clashServer)
|
router.SetClashServer(clashServer)
|
||||||
preServices["clash api"] = clashServer
|
preServices2["clash api"] = clashServer
|
||||||
}
|
}
|
||||||
if needV2RayAPI {
|
if needV2RayAPI {
|
||||||
v2rayServer, err := experimental.NewV2RayServer(logFactory.NewLogger("v2ray-api"), common.PtrValueOrDefault(experimentalOptions.V2RayAPI))
|
v2rayServer, err := experimental.NewV2RayServer(logFactory.NewLogger("v2ray-api"), common.PtrValueOrDefault(experimentalOptions.V2RayAPI))
|
||||||
@ -163,7 +177,7 @@ func New(options Options) (*Box, error) {
|
|||||||
return nil, E.Cause(err, "create v2ray api server")
|
return nil, E.Cause(err, "create v2ray api server")
|
||||||
}
|
}
|
||||||
router.SetV2RayServer(v2rayServer)
|
router.SetV2RayServer(v2rayServer)
|
||||||
preServices["v2ray api"] = v2rayServer
|
preServices2["v2ray api"] = v2rayServer
|
||||||
}
|
}
|
||||||
return &Box{
|
return &Box{
|
||||||
router: router,
|
router: router,
|
||||||
@ -172,7 +186,8 @@ func New(options Options) (*Box, error) {
|
|||||||
createdAt: createdAt,
|
createdAt: createdAt,
|
||||||
logFactory: logFactory,
|
logFactory: logFactory,
|
||||||
logger: logFactory.Logger(),
|
logger: logFactory.Logger(),
|
||||||
preServices: preServices,
|
preServices1: preServices1,
|
||||||
|
preServices2: preServices2,
|
||||||
postServices: postServices,
|
postServices: postServices,
|
||||||
done: make(chan struct{}),
|
done: make(chan struct{}),
|
||||||
}, nil
|
}, nil
|
||||||
@ -217,16 +232,34 @@ func (s *Box) Start() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *Box) preStart() error {
|
func (s *Box) preStart() error {
|
||||||
for serviceName, service := range s.preServices {
|
monitor := taskmonitor.New(s.logger, C.DefaultStartTimeout)
|
||||||
if preService, isPreService := service.(adapter.PreStarter); isPreService {
|
monitor.Start("start logger")
|
||||||
s.logger.Trace("pre-start ", serviceName)
|
err := s.logFactory.Start()
|
||||||
err := preService.PreStart()
|
monitor.Finish()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return E.Cause(err, "pre-starting ", serviceName)
|
return E.Cause(err, "start logger")
|
||||||
|
}
|
||||||
|
for serviceName, service := range s.preServices1 {
|
||||||
|
if preService, isPreService := service.(adapter.PreStarter); isPreService {
|
||||||
|
monitor.Start("pre-start ", serviceName)
|
||||||
|
err := preService.PreStart()
|
||||||
|
monitor.Finish()
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "pre-start ", serviceName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
err := s.startOutbounds()
|
for serviceName, service := range s.preServices2 {
|
||||||
|
if preService, isPreService := service.(adapter.PreStarter); isPreService {
|
||||||
|
monitor.Start("pre-start ", serviceName)
|
||||||
|
err := preService.PreStart()
|
||||||
|
monitor.Finish()
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "pre-start ", serviceName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = s.startOutbounds()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -238,8 +271,13 @@ func (s *Box) start() error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for serviceName, service := range s.preServices {
|
for serviceName, service := range s.preServices1 {
|
||||||
s.logger.Trace("starting ", serviceName)
|
err = service.Start()
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "start ", serviceName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for serviceName, service := range s.preServices2 {
|
||||||
err = service.Start()
|
err = service.Start()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return E.Cause(err, "start ", serviceName)
|
return E.Cause(err, "start ", serviceName)
|
||||||
@ -252,7 +290,6 @@ func (s *Box) start() error {
|
|||||||
} else {
|
} else {
|
||||||
tag = in.Tag()
|
tag = in.Tag()
|
||||||
}
|
}
|
||||||
s.logger.Trace("initializing inbound/", in.Type(), "[", tag, "]")
|
|
||||||
err = in.Start()
|
err = in.Start()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return E.Cause(err, "initialize inbound/", in.Type(), "[", tag, "]")
|
return E.Cause(err, "initialize inbound/", in.Type(), "[", tag, "]")
|
||||||
@ -263,7 +300,6 @@ func (s *Box) start() error {
|
|||||||
|
|
||||||
func (s *Box) postStart() error {
|
func (s *Box) postStart() error {
|
||||||
for serviceName, service := range s.postServices {
|
for serviceName, service := range s.postServices {
|
||||||
s.logger.Trace("starting ", service)
|
|
||||||
err := service.Start()
|
err := service.Start()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return E.Cause(err, "start ", serviceName)
|
return E.Cause(err, "start ", serviceName)
|
||||||
@ -271,14 +307,16 @@ func (s *Box) postStart() error {
|
|||||||
}
|
}
|
||||||
for _, outbound := range s.outbounds {
|
for _, outbound := range s.outbounds {
|
||||||
if lateOutbound, isLateOutbound := outbound.(adapter.PostStarter); isLateOutbound {
|
if lateOutbound, isLateOutbound := outbound.(adapter.PostStarter); isLateOutbound {
|
||||||
s.logger.Trace("post-starting outbound/", outbound.Tag())
|
|
||||||
err := lateOutbound.PostStart()
|
err := lateOutbound.PostStart()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return E.Cause(err, "post-start outbound/", outbound.Tag())
|
return E.Cause(err, "post-start outbound/", outbound.Tag())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
s.logger.Trace("post-starting router")
|
err := s.router.PostStart()
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "post-start router")
|
||||||
|
}
|
||||||
return s.router.PostStart()
|
return s.router.PostStart()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -289,41 +327,53 @@ func (s *Box) Close() error {
|
|||||||
default:
|
default:
|
||||||
close(s.done)
|
close(s.done)
|
||||||
}
|
}
|
||||||
|
monitor := taskmonitor.New(s.logger, C.DefaultStopTimeout)
|
||||||
var errors error
|
var errors error
|
||||||
for serviceName, service := range s.postServices {
|
for serviceName, service := range s.postServices {
|
||||||
s.logger.Trace("closing ", serviceName)
|
monitor.Start("close ", serviceName)
|
||||||
errors = E.Append(errors, service.Close(), func(err error) error {
|
errors = E.Append(errors, service.Close(), func(err error) error {
|
||||||
return E.Cause(err, "close ", serviceName)
|
return E.Cause(err, "close ", serviceName)
|
||||||
})
|
})
|
||||||
|
monitor.Finish()
|
||||||
}
|
}
|
||||||
for i, in := range s.inbounds {
|
for i, in := range s.inbounds {
|
||||||
s.logger.Trace("closing inbound/", in.Type(), "[", i, "]")
|
monitor.Start("close inbound/", in.Type(), "[", i, "]")
|
||||||
errors = E.Append(errors, in.Close(), func(err error) error {
|
errors = E.Append(errors, in.Close(), func(err error) error {
|
||||||
return E.Cause(err, "close inbound/", in.Type(), "[", i, "]")
|
return E.Cause(err, "close inbound/", in.Type(), "[", i, "]")
|
||||||
})
|
})
|
||||||
|
monitor.Finish()
|
||||||
}
|
}
|
||||||
for i, out := range s.outbounds {
|
for i, out := range s.outbounds {
|
||||||
s.logger.Trace("closing outbound/", out.Type(), "[", i, "]")
|
monitor.Start("close outbound/", out.Type(), "[", i, "]")
|
||||||
errors = E.Append(errors, common.Close(out), func(err error) error {
|
errors = E.Append(errors, common.Close(out), func(err error) error {
|
||||||
return E.Cause(err, "close outbound/", out.Type(), "[", i, "]")
|
return E.Cause(err, "close outbound/", out.Type(), "[", i, "]")
|
||||||
})
|
})
|
||||||
|
monitor.Finish()
|
||||||
}
|
}
|
||||||
s.logger.Trace("closing router")
|
monitor.Start("close router")
|
||||||
if err := common.Close(s.router); err != nil {
|
if err := common.Close(s.router); err != nil {
|
||||||
errors = E.Append(errors, err, func(err error) error {
|
errors = E.Append(errors, err, func(err error) error {
|
||||||
return E.Cause(err, "close router")
|
return E.Cause(err, "close router")
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
for serviceName, service := range s.preServices {
|
monitor.Finish()
|
||||||
s.logger.Trace("closing ", serviceName)
|
for serviceName, service := range s.preServices1 {
|
||||||
|
monitor.Start("close ", serviceName)
|
||||||
errors = E.Append(errors, service.Close(), func(err error) error {
|
errors = E.Append(errors, service.Close(), func(err error) error {
|
||||||
return E.Cause(err, "close ", serviceName)
|
return E.Cause(err, "close ", serviceName)
|
||||||
})
|
})
|
||||||
|
monitor.Finish()
|
||||||
|
}
|
||||||
|
for serviceName, service := range s.preServices2 {
|
||||||
|
monitor.Start("close ", serviceName)
|
||||||
|
errors = E.Append(errors, service.Close(), func(err error) error {
|
||||||
|
return E.Cause(err, "close ", serviceName)
|
||||||
|
})
|
||||||
|
monitor.Finish()
|
||||||
}
|
}
|
||||||
s.logger.Trace("closing log factory")
|
|
||||||
if err := common.Close(s.logFactory); err != nil {
|
if err := common.Close(s.logFactory); err != nil {
|
||||||
errors = E.Append(errors, err, func(err error) error {
|
errors = E.Append(errors, err, func(err error) error {
|
||||||
return E.Cause(err, "close log factory")
|
return E.Cause(err, "close logger")
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
return errors
|
return errors
|
||||||
|
@ -4,12 +4,15 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/sagernet/sing-box/adapter"
|
"github.com/sagernet/sing-box/adapter"
|
||||||
|
"github.com/sagernet/sing-box/common/taskmonitor"
|
||||||
|
C "github.com/sagernet/sing-box/constant"
|
||||||
"github.com/sagernet/sing/common"
|
"github.com/sagernet/sing/common"
|
||||||
E "github.com/sagernet/sing/common/exceptions"
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
F "github.com/sagernet/sing/common/format"
|
F "github.com/sagernet/sing/common/format"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (s *Box) startOutbounds() error {
|
func (s *Box) startOutbounds() error {
|
||||||
|
monitor := taskmonitor.New(s.logger, C.DefaultStartTimeout)
|
||||||
outboundTags := make(map[adapter.Outbound]string)
|
outboundTags := make(map[adapter.Outbound]string)
|
||||||
outbounds := make(map[string]adapter.Outbound)
|
outbounds := make(map[string]adapter.Outbound)
|
||||||
for i, outboundToStart := range s.outbounds {
|
for i, outboundToStart := range s.outbounds {
|
||||||
@ -43,8 +46,9 @@ func (s *Box) startOutbounds() error {
|
|||||||
started[outboundTag] = true
|
started[outboundTag] = true
|
||||||
canContinue = true
|
canContinue = true
|
||||||
if starter, isStarter := outboundToStart.(common.Starter); isStarter {
|
if starter, isStarter := outboundToStart.(common.Starter); isStarter {
|
||||||
s.logger.Trace("initializing outbound/", outboundToStart.Type(), "[", outboundTag, "]")
|
monitor.Start("initialize outbound/", outboundToStart.Type(), "[", outboundTag, "]")
|
||||||
err := starter.Start()
|
err := starter.Start()
|
||||||
|
monitor.Finish()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return E.Cause(err, "initialize outbound/", outboundToStart.Type(), "[", outboundTag, "]")
|
return E.Cause(err, "initialize outbound/", outboundToStart.Type(), "[", outboundTag, "]")
|
||||||
}
|
}
|
||||||
|
@ -7,7 +7,6 @@ import (
|
|||||||
|
|
||||||
"github.com/sagernet/sing-box/common/json"
|
"github.com/sagernet/sing-box/common/json"
|
||||||
"github.com/sagernet/sing-box/log"
|
"github.com/sagernet/sing-box/log"
|
||||||
"github.com/sagernet/sing-box/option"
|
|
||||||
E "github.com/sagernet/sing/common/exceptions"
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
@ -69,41 +68,3 @@ func format() error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func formatOne(configPath string) error {
|
|
||||||
configContent, err := os.ReadFile(configPath)
|
|
||||||
if err != nil {
|
|
||||||
return E.Cause(err, "read config")
|
|
||||||
}
|
|
||||||
var options option.Options
|
|
||||||
err = options.UnmarshalJSON(configContent)
|
|
||||||
if err != nil {
|
|
||||||
return E.Cause(err, "decode config")
|
|
||||||
}
|
|
||||||
buffer := new(bytes.Buffer)
|
|
||||||
encoder := json.NewEncoder(buffer)
|
|
||||||
encoder.SetIndent("", " ")
|
|
||||||
err = encoder.Encode(options)
|
|
||||||
if err != nil {
|
|
||||||
return E.Cause(err, "encode config")
|
|
||||||
}
|
|
||||||
if !commandFormatFlagWrite {
|
|
||||||
os.Stdout.WriteString(buffer.String() + "\n")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if bytes.Equal(configContent, buffer.Bytes()) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
output, err := os.Create(configPath)
|
|
||||||
if err != nil {
|
|
||||||
return E.Cause(err, "open output")
|
|
||||||
}
|
|
||||||
_, err = output.Write(buffer.Bytes())
|
|
||||||
output.Close()
|
|
||||||
if err != nil {
|
|
||||||
return E.Cause(err, "write output")
|
|
||||||
}
|
|
||||||
outputPath, _ := filepath.Abs(configPath)
|
|
||||||
os.Stderr.WriteString(outputPath + "\n")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
43
cmd/sing-box/cmd_geoip.go
Normal file
43
cmd/sing-box/cmd_geoip.go
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/sagernet/sing-box/log"
|
||||||
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
|
|
||||||
|
"github.com/oschwald/maxminddb-golang"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
geoipReader *maxminddb.Reader
|
||||||
|
commandGeoIPFlagFile string
|
||||||
|
)
|
||||||
|
|
||||||
|
var commandGeoip = &cobra.Command{
|
||||||
|
Use: "geoip",
|
||||||
|
Short: "GeoIP tools",
|
||||||
|
PersistentPreRun: func(cmd *cobra.Command, args []string) {
|
||||||
|
err := geoipPreRun()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
commandGeoip.PersistentFlags().StringVarP(&commandGeoIPFlagFile, "file", "f", "geoip.db", "geoip file")
|
||||||
|
mainCommand.AddCommand(commandGeoip)
|
||||||
|
}
|
||||||
|
|
||||||
|
func geoipPreRun() error {
|
||||||
|
reader, err := maxminddb.Open(commandGeoIPFlagFile)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if reader.Metadata.DatabaseType != "sing-geoip" {
|
||||||
|
reader.Close()
|
||||||
|
return E.New("incorrect database type, expected sing-geoip, got ", reader.Metadata.DatabaseType)
|
||||||
|
}
|
||||||
|
geoipReader = reader
|
||||||
|
return nil
|
||||||
|
}
|
98
cmd/sing-box/cmd_geoip_export.go
Normal file
98
cmd/sing-box/cmd_geoip_export.go
Normal file
@ -0,0 +1,98 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/sagernet/sing-box/common/json"
|
||||||
|
C "github.com/sagernet/sing-box/constant"
|
||||||
|
"github.com/sagernet/sing-box/log"
|
||||||
|
"github.com/sagernet/sing-box/option"
|
||||||
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
|
|
||||||
|
"github.com/oschwald/maxminddb-golang"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
var flagGeoipExportOutput string
|
||||||
|
|
||||||
|
const flagGeoipExportDefaultOutput = "geoip-<country>.srs"
|
||||||
|
|
||||||
|
var commandGeoipExport = &cobra.Command{
|
||||||
|
Use: "export <country>",
|
||||||
|
Short: "Export geoip country as rule-set",
|
||||||
|
Args: cobra.ExactArgs(1),
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
err := geoipExport(args[0])
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
commandGeoipExport.Flags().StringVarP(&flagGeoipExportOutput, "output", "o", flagGeoipExportDefaultOutput, "Output path")
|
||||||
|
commandGeoip.AddCommand(commandGeoipExport)
|
||||||
|
}
|
||||||
|
|
||||||
|
func geoipExport(countryCode string) error {
|
||||||
|
networks := geoipReader.Networks(maxminddb.SkipAliasedNetworks)
|
||||||
|
countryMap := make(map[string][]*net.IPNet)
|
||||||
|
var (
|
||||||
|
ipNet *net.IPNet
|
||||||
|
nextCountryCode string
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
for networks.Next() {
|
||||||
|
ipNet, err = networks.Network(&nextCountryCode)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
countryMap[nextCountryCode] = append(countryMap[nextCountryCode], ipNet)
|
||||||
|
}
|
||||||
|
ipNets := countryMap[strings.ToLower(countryCode)]
|
||||||
|
if len(ipNets) == 0 {
|
||||||
|
return E.New("country code not found: ", countryCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
outputFile *os.File
|
||||||
|
outputWriter io.Writer
|
||||||
|
)
|
||||||
|
if flagGeoipExportOutput == "stdout" {
|
||||||
|
outputWriter = os.Stdout
|
||||||
|
} else if flagGeoipExportOutput == flagGeoipExportDefaultOutput {
|
||||||
|
outputFile, err = os.Create("geoip-" + countryCode + ".json")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer outputFile.Close()
|
||||||
|
outputWriter = outputFile
|
||||||
|
} else {
|
||||||
|
outputFile, err = os.Create(flagGeoipExportOutput)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer outputFile.Close()
|
||||||
|
outputWriter = outputFile
|
||||||
|
}
|
||||||
|
|
||||||
|
encoder := json.NewEncoder(outputWriter)
|
||||||
|
encoder.SetIndent("", " ")
|
||||||
|
var headlessRule option.DefaultHeadlessRule
|
||||||
|
headlessRule.IPCIDR = make([]string, 0, len(ipNets))
|
||||||
|
for _, cidr := range ipNets {
|
||||||
|
headlessRule.IPCIDR = append(headlessRule.IPCIDR, cidr.String())
|
||||||
|
}
|
||||||
|
var plainRuleSet option.PlainRuleSetCompat
|
||||||
|
plainRuleSet.Version = C.RuleSetVersion1
|
||||||
|
plainRuleSet.Options.Rules = []option.HeadlessRule{
|
||||||
|
{
|
||||||
|
Type: C.RuleTypeDefault,
|
||||||
|
DefaultOptions: headlessRule,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return encoder.Encode(plainRuleSet)
|
||||||
|
}
|
31
cmd/sing-box/cmd_geoip_list.go
Normal file
31
cmd/sing-box/cmd_geoip_list.go
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/sagernet/sing-box/log"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
var commandGeoipList = &cobra.Command{
|
||||||
|
Use: "list",
|
||||||
|
Short: "List geoip country codes",
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
err := listGeoip()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
commandGeoip.AddCommand(commandGeoipList)
|
||||||
|
}
|
||||||
|
|
||||||
|
func listGeoip() error {
|
||||||
|
for _, code := range geoipReader.Metadata.Languages {
|
||||||
|
os.Stdout.WriteString(code + "\n")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
47
cmd/sing-box/cmd_geoip_lookup.go
Normal file
47
cmd/sing-box/cmd_geoip_lookup.go
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/netip"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/sagernet/sing-box/log"
|
||||||
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
|
N "github.com/sagernet/sing/common/network"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
var commandGeoipLookup = &cobra.Command{
|
||||||
|
Use: "lookup <address>",
|
||||||
|
Short: "Lookup if an IP address is contained in the GeoIP database",
|
||||||
|
Args: cobra.ExactArgs(1),
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
err := geoipLookup(args[0])
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
commandGeoip.AddCommand(commandGeoipLookup)
|
||||||
|
}
|
||||||
|
|
||||||
|
func geoipLookup(address string) error {
|
||||||
|
addr, err := netip.ParseAddr(address)
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "parse address")
|
||||||
|
}
|
||||||
|
if !N.IsPublicAddr(addr) {
|
||||||
|
os.Stdout.WriteString("private\n")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var code string
|
||||||
|
_ = geoipReader.Lookup(addr.AsSlice(), &code)
|
||||||
|
if code != "" {
|
||||||
|
os.Stdout.WriteString(code + "\n")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
os.Stdout.WriteString("unknown\n")
|
||||||
|
return nil
|
||||||
|
}
|
41
cmd/sing-box/cmd_geosite.go
Normal file
41
cmd/sing-box/cmd_geosite.go
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/sagernet/sing-box/common/geosite"
|
||||||
|
"github.com/sagernet/sing-box/log"
|
||||||
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
commandGeoSiteFlagFile string
|
||||||
|
geositeReader *geosite.Reader
|
||||||
|
geositeCodeList []string
|
||||||
|
)
|
||||||
|
|
||||||
|
var commandGeoSite = &cobra.Command{
|
||||||
|
Use: "geosite",
|
||||||
|
Short: "Geosite tools",
|
||||||
|
PersistentPreRun: func(cmd *cobra.Command, args []string) {
|
||||||
|
err := geositePreRun()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
commandGeoSite.PersistentFlags().StringVarP(&commandGeoSiteFlagFile, "file", "f", "geosite.db", "geosite file")
|
||||||
|
mainCommand.AddCommand(commandGeoSite)
|
||||||
|
}
|
||||||
|
|
||||||
|
func geositePreRun() error {
|
||||||
|
reader, codeList, err := geosite.Open(commandGeoSiteFlagFile)
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "open geosite file")
|
||||||
|
}
|
||||||
|
geositeReader = reader
|
||||||
|
geositeCodeList = codeList
|
||||||
|
return nil
|
||||||
|
}
|
81
cmd/sing-box/cmd_geosite_export.go
Normal file
81
cmd/sing-box/cmd_geosite_export.go
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/sagernet/sing-box/common/geosite"
|
||||||
|
"github.com/sagernet/sing-box/common/json"
|
||||||
|
C "github.com/sagernet/sing-box/constant"
|
||||||
|
"github.com/sagernet/sing-box/log"
|
||||||
|
"github.com/sagernet/sing-box/option"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
var commandGeositeExportOutput string
|
||||||
|
|
||||||
|
const commandGeositeExportDefaultOutput = "geosite-<category>.json"
|
||||||
|
|
||||||
|
var commandGeositeExport = &cobra.Command{
|
||||||
|
Use: "export <category>",
|
||||||
|
Short: "Export geosite category as rule-set",
|
||||||
|
Args: cobra.ExactArgs(1),
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
err := geositeExport(args[0])
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
commandGeositeExport.Flags().StringVarP(&commandGeositeExportOutput, "output", "o", commandGeositeExportDefaultOutput, "Output path")
|
||||||
|
commandGeoSite.AddCommand(commandGeositeExport)
|
||||||
|
}
|
||||||
|
|
||||||
|
func geositeExport(category string) error {
|
||||||
|
sourceSet, err := geositeReader.Read(category)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
outputFile *os.File
|
||||||
|
outputWriter io.Writer
|
||||||
|
)
|
||||||
|
if commandGeositeExportOutput == "stdout" {
|
||||||
|
outputWriter = os.Stdout
|
||||||
|
} else if commandGeositeExportOutput == commandGeositeExportDefaultOutput {
|
||||||
|
outputFile, err = os.Create("geosite-" + category + ".json")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer outputFile.Close()
|
||||||
|
outputWriter = outputFile
|
||||||
|
} else {
|
||||||
|
outputFile, err = os.Create(commandGeositeExportOutput)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer outputFile.Close()
|
||||||
|
outputWriter = outputFile
|
||||||
|
}
|
||||||
|
|
||||||
|
encoder := json.NewEncoder(outputWriter)
|
||||||
|
encoder.SetIndent("", " ")
|
||||||
|
var headlessRule option.DefaultHeadlessRule
|
||||||
|
defaultRule := geosite.Compile(sourceSet)
|
||||||
|
headlessRule.Domain = defaultRule.Domain
|
||||||
|
headlessRule.DomainSuffix = defaultRule.DomainSuffix
|
||||||
|
headlessRule.DomainKeyword = defaultRule.DomainKeyword
|
||||||
|
headlessRule.DomainRegex = defaultRule.DomainRegex
|
||||||
|
var plainRuleSet option.PlainRuleSetCompat
|
||||||
|
plainRuleSet.Version = C.RuleSetVersion1
|
||||||
|
plainRuleSet.Options.Rules = []option.HeadlessRule{
|
||||||
|
{
|
||||||
|
Type: C.RuleTypeDefault,
|
||||||
|
DefaultOptions: headlessRule,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return encoder.Encode(plainRuleSet)
|
||||||
|
}
|
50
cmd/sing-box/cmd_geosite_list.go
Normal file
50
cmd/sing-box/cmd_geosite_list.go
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
"github.com/sagernet/sing-box/log"
|
||||||
|
F "github.com/sagernet/sing/common/format"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
var commandGeositeList = &cobra.Command{
|
||||||
|
Use: "list <category>",
|
||||||
|
Short: "List geosite categories",
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
err := geositeList()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
commandGeoSite.AddCommand(commandGeositeList)
|
||||||
|
}
|
||||||
|
|
||||||
|
func geositeList() error {
|
||||||
|
var geositeEntry []struct {
|
||||||
|
category string
|
||||||
|
items int
|
||||||
|
}
|
||||||
|
for _, category := range geositeCodeList {
|
||||||
|
sourceSet, err := geositeReader.Read(category)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
geositeEntry = append(geositeEntry, struct {
|
||||||
|
category string
|
||||||
|
items int
|
||||||
|
}{category, len(sourceSet)})
|
||||||
|
}
|
||||||
|
sort.SliceStable(geositeEntry, func(i, j int) bool {
|
||||||
|
return geositeEntry[i].items < geositeEntry[j].items
|
||||||
|
})
|
||||||
|
for _, entry := range geositeEntry {
|
||||||
|
os.Stdout.WriteString(F.ToString(entry.category, " (", entry.items, ")\n"))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
97
cmd/sing-box/cmd_geosite_lookup.go
Normal file
97
cmd/sing-box/cmd_geosite_lookup.go
Normal file
@ -0,0 +1,97 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
"github.com/sagernet/sing-box/log"
|
||||||
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
var commandGeositeLookup = &cobra.Command{
|
||||||
|
Use: "lookup [category] <domain>",
|
||||||
|
Short: "Check if a domain is in the geosite",
|
||||||
|
Args: cobra.RangeArgs(1, 2),
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
var (
|
||||||
|
source string
|
||||||
|
target string
|
||||||
|
)
|
||||||
|
switch len(args) {
|
||||||
|
case 1:
|
||||||
|
target = args[0]
|
||||||
|
case 2:
|
||||||
|
source = args[0]
|
||||||
|
target = args[1]
|
||||||
|
}
|
||||||
|
err := geositeLookup(source, target)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
commandGeoSite.AddCommand(commandGeositeLookup)
|
||||||
|
}
|
||||||
|
|
||||||
|
func geositeLookup(source string, target string) error {
|
||||||
|
var sourceMatcherList []struct {
|
||||||
|
code string
|
||||||
|
matcher *searchGeositeMatcher
|
||||||
|
}
|
||||||
|
if source != "" {
|
||||||
|
sourceSet, err := geositeReader.Read(source)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
sourceMatcher, err := newSearchGeositeMatcher(sourceSet)
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "compile code: "+source)
|
||||||
|
}
|
||||||
|
sourceMatcherList = []struct {
|
||||||
|
code string
|
||||||
|
matcher *searchGeositeMatcher
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
code: source,
|
||||||
|
matcher: sourceMatcher,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
} else {
|
||||||
|
for _, code := range geositeCodeList {
|
||||||
|
sourceSet, err := geositeReader.Read(code)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
sourceMatcher, err := newSearchGeositeMatcher(sourceSet)
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "compile code: "+code)
|
||||||
|
}
|
||||||
|
sourceMatcherList = append(sourceMatcherList, struct {
|
||||||
|
code string
|
||||||
|
matcher *searchGeositeMatcher
|
||||||
|
}{
|
||||||
|
code: code,
|
||||||
|
matcher: sourceMatcher,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sort.SliceStable(sourceMatcherList, func(i, j int) bool {
|
||||||
|
return sourceMatcherList[i].code < sourceMatcherList[j].code
|
||||||
|
})
|
||||||
|
|
||||||
|
for _, matcherItem := range sourceMatcherList {
|
||||||
|
if matchRule := matcherItem.matcher.Match(target); matchRule != "" {
|
||||||
|
os.Stdout.WriteString("Match code (")
|
||||||
|
os.Stdout.WriteString(matcherItem.code)
|
||||||
|
os.Stdout.WriteString(") ")
|
||||||
|
os.Stdout.WriteString(matchRule)
|
||||||
|
os.Stdout.WriteString("\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
56
cmd/sing-box/cmd_geosite_matcher.go
Normal file
56
cmd/sing-box/cmd_geosite_matcher.go
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/sagernet/sing-box/common/geosite"
|
||||||
|
)
|
||||||
|
|
||||||
|
type searchGeositeMatcher struct {
|
||||||
|
domainMap map[string]bool
|
||||||
|
suffixList []string
|
||||||
|
keywordList []string
|
||||||
|
regexList []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func newSearchGeositeMatcher(items []geosite.Item) (*searchGeositeMatcher, error) {
|
||||||
|
options := geosite.Compile(items)
|
||||||
|
domainMap := make(map[string]bool)
|
||||||
|
for _, domain := range options.Domain {
|
||||||
|
domainMap[domain] = true
|
||||||
|
}
|
||||||
|
rule := &searchGeositeMatcher{
|
||||||
|
domainMap: domainMap,
|
||||||
|
suffixList: options.DomainSuffix,
|
||||||
|
keywordList: options.DomainKeyword,
|
||||||
|
regexList: options.DomainRegex,
|
||||||
|
}
|
||||||
|
return rule, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *searchGeositeMatcher) Match(domain string) string {
|
||||||
|
if r.domainMap[domain] {
|
||||||
|
return "domain=" + domain
|
||||||
|
}
|
||||||
|
for _, suffix := range r.suffixList {
|
||||||
|
if strings.HasSuffix(domain, suffix) {
|
||||||
|
return "domain_suffix=" + suffix
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, keyword := range r.keywordList {
|
||||||
|
if strings.Contains(domain, keyword) {
|
||||||
|
return "domain_keyword=" + keyword
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, regexStr := range r.regexList {
|
||||||
|
regex, err := regexp.Compile(regexStr)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if regex.MatchString(domain) {
|
||||||
|
return "domain_regex=" + regexStr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
@ -18,7 +18,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var commandMerge = &cobra.Command{
|
var commandMerge = &cobra.Command{
|
||||||
Use: "merge [output]",
|
Use: "merge <output>",
|
||||||
Short: "Merge configurations",
|
Short: "Merge configurations",
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
err := merge(args[0])
|
err := merge(args[0])
|
||||||
|
14
cmd/sing-box/cmd_rule_set.go
Normal file
14
cmd/sing-box/cmd_rule_set.go
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
var commandRuleSet = &cobra.Command{
|
||||||
|
Use: "rule-set",
|
||||||
|
Short: "Manage rule sets",
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
mainCommand.AddCommand(commandRuleSet)
|
||||||
|
}
|
80
cmd/sing-box/cmd_rule_set_compile.go
Normal file
80
cmd/sing-box/cmd_rule_set_compile.go
Normal file
@ -0,0 +1,80 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/sagernet/sing-box/common/json"
|
||||||
|
"github.com/sagernet/sing-box/common/srs"
|
||||||
|
"github.com/sagernet/sing-box/log"
|
||||||
|
"github.com/sagernet/sing-box/option"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
var flagRuleSetCompileOutput string
|
||||||
|
|
||||||
|
const flagRuleSetCompileDefaultOutput = "<file_name>.srs"
|
||||||
|
|
||||||
|
var commandRuleSetCompile = &cobra.Command{
|
||||||
|
Use: "compile [source-path]",
|
||||||
|
Short: "Compile rule-set json to binary",
|
||||||
|
Args: cobra.ExactArgs(1),
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
err := compileRuleSet(args[0])
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
commandRuleSet.AddCommand(commandRuleSetCompile)
|
||||||
|
commandRuleSetCompile.Flags().StringVarP(&flagRuleSetCompileOutput, "output", "o", flagRuleSetCompileDefaultOutput, "Output file")
|
||||||
|
}
|
||||||
|
|
||||||
|
func compileRuleSet(sourcePath string) error {
|
||||||
|
var (
|
||||||
|
reader io.Reader
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
if sourcePath == "stdin" {
|
||||||
|
reader = os.Stdin
|
||||||
|
} else {
|
||||||
|
reader, err = os.Open(sourcePath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
decoder := json.NewDecoder(json.NewCommentFilter(reader))
|
||||||
|
decoder.DisallowUnknownFields()
|
||||||
|
var plainRuleSet option.PlainRuleSetCompat
|
||||||
|
err = decoder.Decode(&plainRuleSet)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ruleSet := plainRuleSet.Upgrade()
|
||||||
|
var outputPath string
|
||||||
|
if flagRuleSetCompileOutput == flagRuleSetCompileDefaultOutput {
|
||||||
|
if strings.HasSuffix(sourcePath, ".json") {
|
||||||
|
outputPath = sourcePath[:len(sourcePath)-5] + ".srs"
|
||||||
|
} else {
|
||||||
|
outputPath = sourcePath + ".srs"
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
outputPath = flagRuleSetCompileOutput
|
||||||
|
}
|
||||||
|
outputFile, err := os.Create(outputPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = srs.Write(outputFile, ruleSet)
|
||||||
|
if err != nil {
|
||||||
|
outputFile.Close()
|
||||||
|
os.Remove(outputPath)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
outputFile.Close()
|
||||||
|
return nil
|
||||||
|
}
|
87
cmd/sing-box/cmd_rule_set_format.go
Normal file
87
cmd/sing-box/cmd_rule_set_format.go
Normal file
@ -0,0 +1,87 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/sagernet/sing-box/common/json"
|
||||||
|
"github.com/sagernet/sing-box/log"
|
||||||
|
"github.com/sagernet/sing-box/option"
|
||||||
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
var commandRuleSetFormatFlagWrite bool
|
||||||
|
|
||||||
|
var commandRuleSetFormat = &cobra.Command{
|
||||||
|
Use: "format <source-path>",
|
||||||
|
Short: "Format rule-set json",
|
||||||
|
Args: cobra.ExactArgs(1),
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
err := formatRuleSet(args[0])
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
commandRuleSetFormat.Flags().BoolVarP(&commandRuleSetFormatFlagWrite, "write", "w", false, "write result to (source) file instead of stdout")
|
||||||
|
commandRuleSet.AddCommand(commandRuleSetFormat)
|
||||||
|
}
|
||||||
|
|
||||||
|
func formatRuleSet(sourcePath string) error {
|
||||||
|
var (
|
||||||
|
reader io.Reader
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
if sourcePath == "stdin" {
|
||||||
|
reader = os.Stdin
|
||||||
|
} else {
|
||||||
|
reader, err = os.Open(sourcePath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
content, err := io.ReadAll(reader)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
decoder := json.NewDecoder(json.NewCommentFilter(bytes.NewReader(content)))
|
||||||
|
decoder.DisallowUnknownFields()
|
||||||
|
var plainRuleSet option.PlainRuleSetCompat
|
||||||
|
err = decoder.Decode(&plainRuleSet)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ruleSet := plainRuleSet.Upgrade()
|
||||||
|
buffer := new(bytes.Buffer)
|
||||||
|
encoder := json.NewEncoder(buffer)
|
||||||
|
encoder.SetIndent("", " ")
|
||||||
|
err = encoder.Encode(ruleSet)
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "encode config")
|
||||||
|
}
|
||||||
|
outputPath, _ := filepath.Abs(sourcePath)
|
||||||
|
if !commandRuleSetFormatFlagWrite || sourcePath == "stdin" {
|
||||||
|
os.Stdout.WriteString(buffer.String() + "\n")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if bytes.Equal(content, buffer.Bytes()) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
output, err := os.Create(sourcePath)
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "open output")
|
||||||
|
}
|
||||||
|
_, err = output.Write(buffer.Bytes())
|
||||||
|
output.Close()
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "write output")
|
||||||
|
}
|
||||||
|
os.Stderr.WriteString(outputPath + "\n")
|
||||||
|
return nil
|
||||||
|
}
|
@ -14,6 +14,7 @@ import (
|
|||||||
|
|
||||||
"github.com/sagernet/sing-box"
|
"github.com/sagernet/sing-box"
|
||||||
"github.com/sagernet/sing-box/common/badjsonmerge"
|
"github.com/sagernet/sing-box/common/badjsonmerge"
|
||||||
|
C "github.com/sagernet/sing-box/constant"
|
||||||
"github.com/sagernet/sing-box/log"
|
"github.com/sagernet/sing-box/log"
|
||||||
"github.com/sagernet/sing-box/option"
|
"github.com/sagernet/sing-box/option"
|
||||||
E "github.com/sagernet/sing/common/exceptions"
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
@ -193,7 +194,7 @@ func run() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func closeMonitor(ctx context.Context) {
|
func closeMonitor(ctx context.Context) {
|
||||||
time.Sleep(3 * time.Second)
|
time.Sleep(C.DefaultStopFatalTimeout)
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return
|
return
|
||||||
|
@ -38,11 +38,7 @@ func createPreStartedClient() (*box.Box, error) {
|
|||||||
|
|
||||||
func createDialer(instance *box.Box, network string, outboundTag string) (N.Dialer, error) {
|
func createDialer(instance *box.Box, network string, outboundTag string) (N.Dialer, error) {
|
||||||
if outboundTag == "" {
|
if outboundTag == "" {
|
||||||
outbound := instance.Router().DefaultOutbound(N.NetworkName(network))
|
return instance.Router().DefaultOutbound(N.NetworkName(network))
|
||||||
if outbound == nil {
|
|
||||||
return nil, E.New("missing default outbound")
|
|
||||||
}
|
|
||||||
return outbound, nil
|
|
||||||
} else {
|
} else {
|
||||||
outbound, loaded := instance.Router().Outbound(outboundTag)
|
outbound, loaded := instance.Router().Outbound(outboundTag)
|
||||||
if !loaded {
|
if !loaded {
|
||||||
|
@ -18,7 +18,7 @@ import (
|
|||||||
var commandConnectFlagNetwork string
|
var commandConnectFlagNetwork string
|
||||||
|
|
||||||
var commandConnect = &cobra.Command{
|
var commandConnect = &cobra.Command{
|
||||||
Use: "connect [address]",
|
Use: "connect <address>",
|
||||||
Short: "Connect to an address",
|
Short: "Connect to an address",
|
||||||
Args: cobra.ExactArgs(1),
|
Args: cobra.ExactArgs(1),
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"os"
|
"os"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -37,7 +38,7 @@ func main() {
|
|||||||
|
|
||||||
func preRun(cmd *cobra.Command, args []string) {
|
func preRun(cmd *cobra.Command, args []string) {
|
||||||
if disableColor {
|
if disableColor {
|
||||||
log.SetStdLogger(log.NewFactory(log.Formatter{BaseTime: time.Now(), DisableColors: true}, os.Stderr, nil).Logger())
|
log.SetStdLogger(log.NewDefaultFactory(context.Background(), log.Formatter{BaseTime: time.Now(), DisableColors: true}, os.Stderr, "", nil, false).Logger())
|
||||||
}
|
}
|
||||||
if workingDir != "" {
|
if workingDir != "" {
|
||||||
_, err := os.Stat(workingDir)
|
_, err := os.Stat(workingDir)
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
package badjsonmerge
|
package badjsonmerge
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
|
|
||||||
"github.com/sagernet/sing-box/common/badjson"
|
"github.com/sagernet/sing-box/common/badjson"
|
||||||
|
"github.com/sagernet/sing-box/common/json"
|
||||||
"github.com/sagernet/sing-box/option"
|
"github.com/sagernet/sing-box/option"
|
||||||
E "github.com/sagernet/sing/common/exceptions"
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
)
|
)
|
||||||
|
3
common/contextjson/README.md
Normal file
3
common/contextjson/README.md
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
# contextjson
|
||||||
|
|
||||||
|
mod from go1.21.4
|
1325
common/contextjson/decode.go
Normal file
1325
common/contextjson/decode.go
Normal file
File diff suppressed because it is too large
Load Diff
49
common/contextjson/decode_context.go
Normal file
49
common/contextjson/decode_context.go
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
package json
|
||||||
|
|
||||||
|
import "strconv"
|
||||||
|
|
||||||
|
type decodeContext struct {
|
||||||
|
parent *decodeContext
|
||||||
|
index int
|
||||||
|
key string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decodeState) formatContext() string {
|
||||||
|
var description string
|
||||||
|
context := d.context
|
||||||
|
var appendDot bool
|
||||||
|
for context != nil {
|
||||||
|
if appendDot {
|
||||||
|
description = "." + description
|
||||||
|
}
|
||||||
|
if context.key != "" {
|
||||||
|
description = context.key + description
|
||||||
|
appendDot = true
|
||||||
|
} else {
|
||||||
|
description = "[" + strconv.Itoa(context.index) + "]" + description
|
||||||
|
appendDot = false
|
||||||
|
}
|
||||||
|
context = context.parent
|
||||||
|
}
|
||||||
|
return description
|
||||||
|
}
|
||||||
|
|
||||||
|
type contextError struct {
|
||||||
|
parent error
|
||||||
|
context string
|
||||||
|
index bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *contextError) Unwrap() error {
|
||||||
|
return c.parent
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *contextError) Error() string {
|
||||||
|
//goland:noinspection GoTypeAssertionOnErrors
|
||||||
|
switch c.parent.(type) {
|
||||||
|
case *contextError:
|
||||||
|
return c.context + "." + c.parent.Error()
|
||||||
|
default:
|
||||||
|
return c.context + ": " + c.parent.Error()
|
||||||
|
}
|
||||||
|
}
|
1283
common/contextjson/encode.go
Normal file
1283
common/contextjson/encode.go
Normal file
File diff suppressed because it is too large
Load Diff
48
common/contextjson/fold.go
Normal file
48
common/contextjson/fold.go
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package json
|
||||||
|
|
||||||
|
import (
|
||||||
|
"unicode"
|
||||||
|
"unicode/utf8"
|
||||||
|
)
|
||||||
|
|
||||||
|
// foldName returns a folded string such that foldName(x) == foldName(y)
|
||||||
|
// is identical to bytes.EqualFold(x, y).
|
||||||
|
func foldName(in []byte) []byte {
|
||||||
|
// This is inlinable to take advantage of "function outlining".
|
||||||
|
var arr [32]byte // large enough for most JSON names
|
||||||
|
return appendFoldedName(arr[:0], in)
|
||||||
|
}
|
||||||
|
|
||||||
|
func appendFoldedName(out, in []byte) []byte {
|
||||||
|
for i := 0; i < len(in); {
|
||||||
|
// Handle single-byte ASCII.
|
||||||
|
if c := in[i]; c < utf8.RuneSelf {
|
||||||
|
if 'a' <= c && c <= 'z' {
|
||||||
|
c -= 'a' - 'A'
|
||||||
|
}
|
||||||
|
out = append(out, c)
|
||||||
|
i++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Handle multi-byte Unicode.
|
||||||
|
r, n := utf8.DecodeRune(in[i:])
|
||||||
|
out = utf8.AppendRune(out, foldRune(r))
|
||||||
|
i += n
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// foldRune is returns the smallest rune for all runes in the same fold set.
|
||||||
|
func foldRune(r rune) rune {
|
||||||
|
for {
|
||||||
|
r2 := unicode.SimpleFold(r)
|
||||||
|
if r2 <= r {
|
||||||
|
return r2
|
||||||
|
}
|
||||||
|
r = r2
|
||||||
|
}
|
||||||
|
}
|
174
common/contextjson/indent.go
Normal file
174
common/contextjson/indent.go
Normal file
@ -0,0 +1,174 @@
|
|||||||
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package json
|
||||||
|
|
||||||
|
import "bytes"
|
||||||
|
|
||||||
|
// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029
|
||||||
|
// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029
|
||||||
|
// so that the JSON will be safe to embed inside HTML <script> tags.
|
||||||
|
// For historical reasons, web browsers don't honor standard HTML
|
||||||
|
// escaping within <script> tags, so an alternative JSON encoding must be used.
|
||||||
|
func HTMLEscape(dst *bytes.Buffer, src []byte) {
|
||||||
|
dst.Grow(len(src))
|
||||||
|
dst.Write(appendHTMLEscape(dst.AvailableBuffer(), src))
|
||||||
|
}
|
||||||
|
|
||||||
|
func appendHTMLEscape(dst, src []byte) []byte {
|
||||||
|
// The characters can only appear in string literals,
|
||||||
|
// so just scan the string one byte at a time.
|
||||||
|
start := 0
|
||||||
|
for i, c := range src {
|
||||||
|
if c == '<' || c == '>' || c == '&' {
|
||||||
|
dst = append(dst, src[start:i]...)
|
||||||
|
dst = append(dst, '\\', 'u', '0', '0', hex[c>>4], hex[c&0xF])
|
||||||
|
start = i + 1
|
||||||
|
}
|
||||||
|
// Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
|
||||||
|
if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
|
||||||
|
dst = append(dst, src[start:i]...)
|
||||||
|
dst = append(dst, '\\', 'u', '2', '0', '2', hex[src[i+2]&0xF])
|
||||||
|
start = i + len("\u2029")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return append(dst, src[start:]...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compact appends to dst the JSON-encoded src with
|
||||||
|
// insignificant space characters elided.
|
||||||
|
func Compact(dst *bytes.Buffer, src []byte) error {
|
||||||
|
dst.Grow(len(src))
|
||||||
|
b := dst.AvailableBuffer()
|
||||||
|
b, err := appendCompact(b, src, false)
|
||||||
|
dst.Write(b)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func appendCompact(dst, src []byte, escape bool) ([]byte, error) {
|
||||||
|
origLen := len(dst)
|
||||||
|
scan := newScanner()
|
||||||
|
defer freeScanner(scan)
|
||||||
|
start := 0
|
||||||
|
for i, c := range src {
|
||||||
|
if escape && (c == '<' || c == '>' || c == '&') {
|
||||||
|
dst = append(dst, src[start:i]...)
|
||||||
|
dst = append(dst, '\\', 'u', '0', '0', hex[c>>4], hex[c&0xF])
|
||||||
|
start = i + 1
|
||||||
|
}
|
||||||
|
// Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
|
||||||
|
if escape && c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
|
||||||
|
dst = append(dst, src[start:i]...)
|
||||||
|
dst = append(dst, '\\', 'u', '2', '0', '2', hex[src[i+2]&0xF])
|
||||||
|
start = i + len("\u2029")
|
||||||
|
}
|
||||||
|
v := scan.step(scan, c)
|
||||||
|
if v >= scanSkipSpace {
|
||||||
|
if v == scanError {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
dst = append(dst, src[start:i]...)
|
||||||
|
start = i + 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if scan.eof() == scanError {
|
||||||
|
return dst[:origLen], scan.err
|
||||||
|
}
|
||||||
|
dst = append(dst, src[start:]...)
|
||||||
|
return dst, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func appendNewline(dst []byte, prefix, indent string, depth int) []byte {
|
||||||
|
dst = append(dst, '\n')
|
||||||
|
dst = append(dst, prefix...)
|
||||||
|
for i := 0; i < depth; i++ {
|
||||||
|
dst = append(dst, indent...)
|
||||||
|
}
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// indentGrowthFactor specifies the growth factor of indenting JSON input.
|
||||||
|
// Empirically, the growth factor was measured to be between 1.4x to 1.8x
|
||||||
|
// for some set of compacted JSON with the indent being a single tab.
|
||||||
|
// Specify a growth factor slightly larger than what is observed
|
||||||
|
// to reduce probability of allocation in appendIndent.
|
||||||
|
// A factor no higher than 2 ensures that wasted space never exceeds 50%.
|
||||||
|
const indentGrowthFactor = 2
|
||||||
|
|
||||||
|
// Indent appends to dst an indented form of the JSON-encoded src.
|
||||||
|
// Each element in a JSON object or array begins on a new,
|
||||||
|
// indented line beginning with prefix followed by one or more
|
||||||
|
// copies of indent according to the indentation nesting.
|
||||||
|
// The data appended to dst does not begin with the prefix nor
|
||||||
|
// any indentation, to make it easier to embed inside other formatted JSON data.
|
||||||
|
// Although leading space characters (space, tab, carriage return, newline)
|
||||||
|
// at the beginning of src are dropped, trailing space characters
|
||||||
|
// at the end of src are preserved and copied to dst.
|
||||||
|
// For example, if src has no trailing spaces, neither will dst;
|
||||||
|
// if src ends in a trailing newline, so will dst.
|
||||||
|
func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error {
|
||||||
|
dst.Grow(indentGrowthFactor * len(src))
|
||||||
|
b := dst.AvailableBuffer()
|
||||||
|
b, err := appendIndent(b, src, prefix, indent)
|
||||||
|
dst.Write(b)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func appendIndent(dst, src []byte, prefix, indent string) ([]byte, error) {
|
||||||
|
origLen := len(dst)
|
||||||
|
scan := newScanner()
|
||||||
|
defer freeScanner(scan)
|
||||||
|
needIndent := false
|
||||||
|
depth := 0
|
||||||
|
for _, c := range src {
|
||||||
|
scan.bytes++
|
||||||
|
v := scan.step(scan, c)
|
||||||
|
if v == scanSkipSpace {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if v == scanError {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if needIndent && v != scanEndObject && v != scanEndArray {
|
||||||
|
needIndent = false
|
||||||
|
depth++
|
||||||
|
dst = appendNewline(dst, prefix, indent, depth)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Emit semantically uninteresting bytes
|
||||||
|
// (in particular, punctuation in strings) unmodified.
|
||||||
|
if v == scanContinue {
|
||||||
|
dst = append(dst, c)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add spacing around real punctuation.
|
||||||
|
switch c {
|
||||||
|
case '{', '[':
|
||||||
|
// delay indent so that empty object and array are formatted as {} and [].
|
||||||
|
needIndent = true
|
||||||
|
dst = append(dst, c)
|
||||||
|
case ',':
|
||||||
|
dst = append(dst, c)
|
||||||
|
dst = appendNewline(dst, prefix, indent, depth)
|
||||||
|
case ':':
|
||||||
|
dst = append(dst, c, ' ')
|
||||||
|
case '}', ']':
|
||||||
|
if needIndent {
|
||||||
|
// suppress indent in empty object/array
|
||||||
|
needIndent = false
|
||||||
|
} else {
|
||||||
|
depth--
|
||||||
|
dst = appendNewline(dst, prefix, indent, depth)
|
||||||
|
}
|
||||||
|
dst = append(dst, c)
|
||||||
|
default:
|
||||||
|
dst = append(dst, c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if scan.eof() == scanError {
|
||||||
|
return dst[:origLen], scan.err
|
||||||
|
}
|
||||||
|
return dst, nil
|
||||||
|
}
|
610
common/contextjson/scanner.go
Normal file
610
common/contextjson/scanner.go
Normal file
@ -0,0 +1,610 @@
|
|||||||
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package json
|
||||||
|
|
||||||
|
// JSON value parser state machine.
|
||||||
|
// Just about at the limit of what is reasonable to write by hand.
|
||||||
|
// Some parts are a bit tedious, but overall it nicely factors out the
|
||||||
|
// otherwise common code from the multiple scanning functions
|
||||||
|
// in this package (Compact, Indent, checkValid, etc).
|
||||||
|
//
|
||||||
|
// This file starts with two simple examples using the scanner
|
||||||
|
// before diving into the scanner itself.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Valid reports whether data is a valid JSON encoding.
|
||||||
|
func Valid(data []byte) bool {
|
||||||
|
scan := newScanner()
|
||||||
|
defer freeScanner(scan)
|
||||||
|
return checkValid(data, scan) == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkValid verifies that data is valid JSON-encoded data.
|
||||||
|
// scan is passed in for use by checkValid to avoid an allocation.
|
||||||
|
// checkValid returns nil or a SyntaxError.
|
||||||
|
func checkValid(data []byte, scan *scanner) error {
|
||||||
|
scan.reset()
|
||||||
|
for _, c := range data {
|
||||||
|
scan.bytes++
|
||||||
|
if scan.step(scan, c) == scanError {
|
||||||
|
return scan.err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if scan.eof() == scanError {
|
||||||
|
return scan.err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// A SyntaxError is a description of a JSON syntax error.
|
||||||
|
// Unmarshal will return a SyntaxError if the JSON can't be parsed.
|
||||||
|
type SyntaxError struct {
|
||||||
|
msg string // description of error
|
||||||
|
Offset int64 // error occurred after reading Offset bytes
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *SyntaxError) Error() string { return e.msg }
|
||||||
|
|
||||||
|
// A scanner is a JSON scanning state machine.
|
||||||
|
// Callers call scan.reset and then pass bytes in one at a time
|
||||||
|
// by calling scan.step(&scan, c) for each byte.
|
||||||
|
// The return value, referred to as an opcode, tells the
|
||||||
|
// caller about significant parsing events like beginning
|
||||||
|
// and ending literals, objects, and arrays, so that the
|
||||||
|
// caller can follow along if it wishes.
|
||||||
|
// The return value scanEnd indicates that a single top-level
|
||||||
|
// JSON value has been completed, *before* the byte that
|
||||||
|
// just got passed in. (The indication must be delayed in order
|
||||||
|
// to recognize the end of numbers: is 123 a whole value or
|
||||||
|
// the beginning of 12345e+6?).
|
||||||
|
type scanner struct {
|
||||||
|
// The step is a func to be called to execute the next transition.
|
||||||
|
// Also tried using an integer constant and a single func
|
||||||
|
// with a switch, but using the func directly was 10% faster
|
||||||
|
// on a 64-bit Mac Mini, and it's nicer to read.
|
||||||
|
step func(*scanner, byte) int
|
||||||
|
|
||||||
|
// Reached end of top-level value.
|
||||||
|
endTop bool
|
||||||
|
|
||||||
|
// Stack of what we're in the middle of - array values, object keys, object values.
|
||||||
|
parseState []int
|
||||||
|
|
||||||
|
// Error that happened, if any.
|
||||||
|
err error
|
||||||
|
|
||||||
|
// total bytes consumed, updated by decoder.Decode (and deliberately
|
||||||
|
// not set to zero by scan.reset)
|
||||||
|
bytes int64
|
||||||
|
}
|
||||||
|
|
||||||
|
var scannerPool = sync.Pool{
|
||||||
|
New: func() any {
|
||||||
|
return &scanner{}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func newScanner() *scanner {
|
||||||
|
scan := scannerPool.Get().(*scanner)
|
||||||
|
// scan.reset by design doesn't set bytes to zero
|
||||||
|
scan.bytes = 0
|
||||||
|
scan.reset()
|
||||||
|
return scan
|
||||||
|
}
|
||||||
|
|
||||||
|
func freeScanner(scan *scanner) {
|
||||||
|
// Avoid hanging on to too much memory in extreme cases.
|
||||||
|
if len(scan.parseState) > 1024 {
|
||||||
|
scan.parseState = nil
|
||||||
|
}
|
||||||
|
scannerPool.Put(scan)
|
||||||
|
}
|
||||||
|
|
||||||
|
// These values are returned by the state transition functions
|
||||||
|
// assigned to scanner.state and the method scanner.eof.
|
||||||
|
// They give details about the current state of the scan that
|
||||||
|
// callers might be interested to know about.
|
||||||
|
// It is okay to ignore the return value of any particular
|
||||||
|
// call to scanner.state: if one call returns scanError,
|
||||||
|
// every subsequent call will return scanError too.
|
||||||
|
const (
|
||||||
|
// Continue.
|
||||||
|
scanContinue = iota // uninteresting byte
|
||||||
|
scanBeginLiteral // end implied by next result != scanContinue
|
||||||
|
scanBeginObject // begin object
|
||||||
|
scanObjectKey // just finished object key (string)
|
||||||
|
scanObjectValue // just finished non-last object value
|
||||||
|
scanEndObject // end object (implies scanObjectValue if possible)
|
||||||
|
scanBeginArray // begin array
|
||||||
|
scanArrayValue // just finished array value
|
||||||
|
scanEndArray // end array (implies scanArrayValue if possible)
|
||||||
|
scanSkipSpace // space byte; can skip; known to be last "continue" result
|
||||||
|
|
||||||
|
// Stop.
|
||||||
|
scanEnd // top-level value ended *before* this byte; known to be first "stop" result
|
||||||
|
scanError // hit an error, scanner.err.
|
||||||
|
)
|
||||||
|
|
||||||
|
// These values are stored in the parseState stack.
|
||||||
|
// They give the current state of a composite value
|
||||||
|
// being scanned. If the parser is inside a nested value
|
||||||
|
// the parseState describes the nested state, outermost at entry 0.
|
||||||
|
const (
|
||||||
|
parseObjectKey = iota // parsing object key (before colon)
|
||||||
|
parseObjectValue // parsing object value (after colon)
|
||||||
|
parseArrayValue // parsing array value
|
||||||
|
)
|
||||||
|
|
||||||
|
// This limits the max nesting depth to prevent stack overflow.
|
||||||
|
// This is permitted by https://tools.ietf.org/html/rfc7159#section-9
|
||||||
|
const maxNestingDepth = 10000
|
||||||
|
|
||||||
|
// reset prepares the scanner for use.
|
||||||
|
// It must be called before calling s.step.
|
||||||
|
func (s *scanner) reset() {
|
||||||
|
s.step = stateBeginValue
|
||||||
|
s.parseState = s.parseState[0:0]
|
||||||
|
s.err = nil
|
||||||
|
s.endTop = false
|
||||||
|
}
|
||||||
|
|
||||||
|
// eof tells the scanner that the end of input has been reached.
|
||||||
|
// It returns a scan status just as s.step does.
|
||||||
|
func (s *scanner) eof() int {
|
||||||
|
if s.err != nil {
|
||||||
|
return scanError
|
||||||
|
}
|
||||||
|
if s.endTop {
|
||||||
|
return scanEnd
|
||||||
|
}
|
||||||
|
s.step(s, ' ')
|
||||||
|
if s.endTop {
|
||||||
|
return scanEnd
|
||||||
|
}
|
||||||
|
if s.err == nil {
|
||||||
|
s.err = &SyntaxError{"unexpected end of JSON input", s.bytes}
|
||||||
|
}
|
||||||
|
return scanError
|
||||||
|
}
|
||||||
|
|
||||||
|
// pushParseState pushes a new parse state p onto the parse stack.
|
||||||
|
// an error state is returned if maxNestingDepth was exceeded, otherwise successState is returned.
|
||||||
|
func (s *scanner) pushParseState(c byte, newParseState int, successState int) int {
|
||||||
|
s.parseState = append(s.parseState, newParseState)
|
||||||
|
if len(s.parseState) <= maxNestingDepth {
|
||||||
|
return successState
|
||||||
|
}
|
||||||
|
return s.error(c, "exceeded max depth")
|
||||||
|
}
|
||||||
|
|
||||||
|
// popParseState pops a parse state (already obtained) off the stack
|
||||||
|
// and updates s.step accordingly.
|
||||||
|
func (s *scanner) popParseState() {
|
||||||
|
n := len(s.parseState) - 1
|
||||||
|
s.parseState = s.parseState[0:n]
|
||||||
|
if n == 0 {
|
||||||
|
s.step = stateEndTop
|
||||||
|
s.endTop = true
|
||||||
|
} else {
|
||||||
|
s.step = stateEndValue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func isSpace(c byte) bool {
|
||||||
|
return c <= ' ' && (c == ' ' || c == '\t' || c == '\r' || c == '\n')
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateBeginValueOrEmpty is the state after reading `[`.
|
||||||
|
func stateBeginValueOrEmpty(s *scanner, c byte) int {
|
||||||
|
if isSpace(c) {
|
||||||
|
return scanSkipSpace
|
||||||
|
}
|
||||||
|
if c == ']' {
|
||||||
|
return stateEndValue(s, c)
|
||||||
|
}
|
||||||
|
return stateBeginValue(s, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateBeginValue is the state at the beginning of the input.
|
||||||
|
func stateBeginValue(s *scanner, c byte) int {
|
||||||
|
if isSpace(c) {
|
||||||
|
return scanSkipSpace
|
||||||
|
}
|
||||||
|
switch c {
|
||||||
|
case '{':
|
||||||
|
s.step = stateBeginStringOrEmpty
|
||||||
|
return s.pushParseState(c, parseObjectKey, scanBeginObject)
|
||||||
|
case '[':
|
||||||
|
s.step = stateBeginValueOrEmpty
|
||||||
|
return s.pushParseState(c, parseArrayValue, scanBeginArray)
|
||||||
|
case '"':
|
||||||
|
s.step = stateInString
|
||||||
|
return scanBeginLiteral
|
||||||
|
case '-':
|
||||||
|
s.step = stateNeg
|
||||||
|
return scanBeginLiteral
|
||||||
|
case '0': // beginning of 0.123
|
||||||
|
s.step = state0
|
||||||
|
return scanBeginLiteral
|
||||||
|
case 't': // beginning of true
|
||||||
|
s.step = stateT
|
||||||
|
return scanBeginLiteral
|
||||||
|
case 'f': // beginning of false
|
||||||
|
s.step = stateF
|
||||||
|
return scanBeginLiteral
|
||||||
|
case 'n': // beginning of null
|
||||||
|
s.step = stateN
|
||||||
|
return scanBeginLiteral
|
||||||
|
}
|
||||||
|
if '1' <= c && c <= '9' { // beginning of 1234.5
|
||||||
|
s.step = state1
|
||||||
|
return scanBeginLiteral
|
||||||
|
}
|
||||||
|
return s.error(c, "looking for beginning of value")
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateBeginStringOrEmpty is the state after reading `{`.
|
||||||
|
func stateBeginStringOrEmpty(s *scanner, c byte) int {
|
||||||
|
if isSpace(c) {
|
||||||
|
return scanSkipSpace
|
||||||
|
}
|
||||||
|
if c == '}' {
|
||||||
|
n := len(s.parseState)
|
||||||
|
s.parseState[n-1] = parseObjectValue
|
||||||
|
return stateEndValue(s, c)
|
||||||
|
}
|
||||||
|
return stateBeginString(s, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateBeginString is the state after reading `{"key": value,`.
|
||||||
|
func stateBeginString(s *scanner, c byte) int {
|
||||||
|
if isSpace(c) {
|
||||||
|
return scanSkipSpace
|
||||||
|
}
|
||||||
|
if c == '"' {
|
||||||
|
s.step = stateInString
|
||||||
|
return scanBeginLiteral
|
||||||
|
}
|
||||||
|
return s.error(c, "looking for beginning of object key string")
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateEndValue is the state after completing a value,
|
||||||
|
// such as after reading `{}` or `true` or `["x"`.
|
||||||
|
func stateEndValue(s *scanner, c byte) int {
|
||||||
|
n := len(s.parseState)
|
||||||
|
if n == 0 {
|
||||||
|
// Completed top-level before the current byte.
|
||||||
|
s.step = stateEndTop
|
||||||
|
s.endTop = true
|
||||||
|
return stateEndTop(s, c)
|
||||||
|
}
|
||||||
|
if isSpace(c) {
|
||||||
|
s.step = stateEndValue
|
||||||
|
return scanSkipSpace
|
||||||
|
}
|
||||||
|
ps := s.parseState[n-1]
|
||||||
|
switch ps {
|
||||||
|
case parseObjectKey:
|
||||||
|
if c == ':' {
|
||||||
|
s.parseState[n-1] = parseObjectValue
|
||||||
|
s.step = stateBeginValue
|
||||||
|
return scanObjectKey
|
||||||
|
}
|
||||||
|
return s.error(c, "after object key")
|
||||||
|
case parseObjectValue:
|
||||||
|
if c == ',' {
|
||||||
|
s.parseState[n-1] = parseObjectKey
|
||||||
|
s.step = stateBeginString
|
||||||
|
return scanObjectValue
|
||||||
|
}
|
||||||
|
if c == '}' {
|
||||||
|
s.popParseState()
|
||||||
|
return scanEndObject
|
||||||
|
}
|
||||||
|
return s.error(c, "after object key:value pair")
|
||||||
|
case parseArrayValue:
|
||||||
|
if c == ',' {
|
||||||
|
s.step = stateBeginValue
|
||||||
|
return scanArrayValue
|
||||||
|
}
|
||||||
|
if c == ']' {
|
||||||
|
s.popParseState()
|
||||||
|
return scanEndArray
|
||||||
|
}
|
||||||
|
return s.error(c, "after array element")
|
||||||
|
}
|
||||||
|
return s.error(c, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateEndTop is the state after finishing the top-level value,
|
||||||
|
// such as after reading `{}` or `[1,2,3]`.
|
||||||
|
// Only space characters should be seen now.
|
||||||
|
func stateEndTop(s *scanner, c byte) int {
|
||||||
|
if !isSpace(c) {
|
||||||
|
// Complain about non-space byte on next call.
|
||||||
|
s.error(c, "after top-level value")
|
||||||
|
}
|
||||||
|
return scanEnd
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateInString is the state after reading `"`.
|
||||||
|
func stateInString(s *scanner, c byte) int {
|
||||||
|
if c == '"' {
|
||||||
|
s.step = stateEndValue
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
if c == '\\' {
|
||||||
|
s.step = stateInStringEsc
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
if c < 0x20 {
|
||||||
|
return s.error(c, "in string literal")
|
||||||
|
}
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateInStringEsc is the state after reading `"\` during a quoted string.
|
||||||
|
func stateInStringEsc(s *scanner, c byte) int {
|
||||||
|
switch c {
|
||||||
|
case 'b', 'f', 'n', 'r', 't', '\\', '/', '"':
|
||||||
|
s.step = stateInString
|
||||||
|
return scanContinue
|
||||||
|
case 'u':
|
||||||
|
s.step = stateInStringEscU
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
return s.error(c, "in string escape code")
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateInStringEscU is the state after reading `"\u` during a quoted string.
|
||||||
|
func stateInStringEscU(s *scanner, c byte) int {
|
||||||
|
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
|
||||||
|
s.step = stateInStringEscU1
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
// numbers
|
||||||
|
return s.error(c, "in \\u hexadecimal character escape")
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateInStringEscU1 is the state after reading `"\u1` during a quoted string.
|
||||||
|
func stateInStringEscU1(s *scanner, c byte) int {
|
||||||
|
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
|
||||||
|
s.step = stateInStringEscU12
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
// numbers
|
||||||
|
return s.error(c, "in \\u hexadecimal character escape")
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateInStringEscU12 is the state after reading `"\u12` during a quoted string.
|
||||||
|
func stateInStringEscU12(s *scanner, c byte) int {
|
||||||
|
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
|
||||||
|
s.step = stateInStringEscU123
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
// numbers
|
||||||
|
return s.error(c, "in \\u hexadecimal character escape")
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateInStringEscU123 is the state after reading `"\u123` during a quoted string.
|
||||||
|
func stateInStringEscU123(s *scanner, c byte) int {
|
||||||
|
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
|
||||||
|
s.step = stateInString
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
// numbers
|
||||||
|
return s.error(c, "in \\u hexadecimal character escape")
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateNeg is the state after reading `-` during a number.
|
||||||
|
func stateNeg(s *scanner, c byte) int {
|
||||||
|
if c == '0' {
|
||||||
|
s.step = state0
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
if '1' <= c && c <= '9' {
|
||||||
|
s.step = state1
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
return s.error(c, "in numeric literal")
|
||||||
|
}
|
||||||
|
|
||||||
|
// state1 is the state after reading a non-zero integer during a number,
|
||||||
|
// such as after reading `1` or `100` but not `0`.
|
||||||
|
func state1(s *scanner, c byte) int {
|
||||||
|
if '0' <= c && c <= '9' {
|
||||||
|
s.step = state1
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
return state0(s, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
// state0 is the state after reading `0` during a number.
|
||||||
|
func state0(s *scanner, c byte) int {
|
||||||
|
if c == '.' {
|
||||||
|
s.step = stateDot
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
if c == 'e' || c == 'E' {
|
||||||
|
s.step = stateE
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
return stateEndValue(s, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateDot is the state after reading the integer and decimal point in a number,
|
||||||
|
// such as after reading `1.`.
|
||||||
|
func stateDot(s *scanner, c byte) int {
|
||||||
|
if '0' <= c && c <= '9' {
|
||||||
|
s.step = stateDot0
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
return s.error(c, "after decimal point in numeric literal")
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateDot0 is the state after reading the integer, decimal point, and subsequent
|
||||||
|
// digits of a number, such as after reading `3.14`.
|
||||||
|
func stateDot0(s *scanner, c byte) int {
|
||||||
|
if '0' <= c && c <= '9' {
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
if c == 'e' || c == 'E' {
|
||||||
|
s.step = stateE
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
return stateEndValue(s, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateE is the state after reading the mantissa and e in a number,
|
||||||
|
// such as after reading `314e` or `0.314e`.
|
||||||
|
func stateE(s *scanner, c byte) int {
|
||||||
|
if c == '+' || c == '-' {
|
||||||
|
s.step = stateESign
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
return stateESign(s, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateESign is the state after reading the mantissa, e, and sign in a number,
|
||||||
|
// such as after reading `314e-` or `0.314e+`.
|
||||||
|
func stateESign(s *scanner, c byte) int {
|
||||||
|
if '0' <= c && c <= '9' {
|
||||||
|
s.step = stateE0
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
return s.error(c, "in exponent of numeric literal")
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateE0 is the state after reading the mantissa, e, optional sign,
|
||||||
|
// and at least one digit of the exponent in a number,
|
||||||
|
// such as after reading `314e-2` or `0.314e+1` or `3.14e0`.
|
||||||
|
func stateE0(s *scanner, c byte) int {
|
||||||
|
if '0' <= c && c <= '9' {
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
return stateEndValue(s, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateT is the state after reading `t`.
|
||||||
|
func stateT(s *scanner, c byte) int {
|
||||||
|
if c == 'r' {
|
||||||
|
s.step = stateTr
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
return s.error(c, "in literal true (expecting 'r')")
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateTr is the state after reading `tr`.
|
||||||
|
func stateTr(s *scanner, c byte) int {
|
||||||
|
if c == 'u' {
|
||||||
|
s.step = stateTru
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
return s.error(c, "in literal true (expecting 'u')")
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateTru is the state after reading `tru`.
|
||||||
|
func stateTru(s *scanner, c byte) int {
|
||||||
|
if c == 'e' {
|
||||||
|
s.step = stateEndValue
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
return s.error(c, "in literal true (expecting 'e')")
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateF is the state after reading `f`.
|
||||||
|
func stateF(s *scanner, c byte) int {
|
||||||
|
if c == 'a' {
|
||||||
|
s.step = stateFa
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
return s.error(c, "in literal false (expecting 'a')")
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateFa is the state after reading `fa`.
|
||||||
|
func stateFa(s *scanner, c byte) int {
|
||||||
|
if c == 'l' {
|
||||||
|
s.step = stateFal
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
return s.error(c, "in literal false (expecting 'l')")
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateFal is the state after reading `fal`.
|
||||||
|
func stateFal(s *scanner, c byte) int {
|
||||||
|
if c == 's' {
|
||||||
|
s.step = stateFals
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
return s.error(c, "in literal false (expecting 's')")
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateFals is the state after reading `fals`.
|
||||||
|
func stateFals(s *scanner, c byte) int {
|
||||||
|
if c == 'e' {
|
||||||
|
s.step = stateEndValue
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
return s.error(c, "in literal false (expecting 'e')")
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateN is the state after reading `n`.
|
||||||
|
func stateN(s *scanner, c byte) int {
|
||||||
|
if c == 'u' {
|
||||||
|
s.step = stateNu
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
return s.error(c, "in literal null (expecting 'u')")
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateNu is the state after reading `nu`.
|
||||||
|
func stateNu(s *scanner, c byte) int {
|
||||||
|
if c == 'l' {
|
||||||
|
s.step = stateNul
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
return s.error(c, "in literal null (expecting 'l')")
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateNul is the state after reading `nul`.
|
||||||
|
func stateNul(s *scanner, c byte) int {
|
||||||
|
if c == 'l' {
|
||||||
|
s.step = stateEndValue
|
||||||
|
return scanContinue
|
||||||
|
}
|
||||||
|
return s.error(c, "in literal null (expecting 'l')")
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateError is the state after reaching a syntax error,
|
||||||
|
// such as after reading `[1}` or `5.1.2`.
|
||||||
|
func stateError(s *scanner, c byte) int {
|
||||||
|
return scanError
|
||||||
|
}
|
||||||
|
|
||||||
|
// error records an error and switches to the error state.
|
||||||
|
func (s *scanner) error(c byte, context string) int {
|
||||||
|
s.step = stateError
|
||||||
|
s.err = &SyntaxError{"invalid character " + quoteChar(c) + " " + context, s.bytes}
|
||||||
|
return scanError
|
||||||
|
}
|
||||||
|
|
||||||
|
// quoteChar formats c as a quoted character literal.
|
||||||
|
func quoteChar(c byte) string {
|
||||||
|
// special cases - different from quoted strings
|
||||||
|
if c == '\'' {
|
||||||
|
return `'\''`
|
||||||
|
}
|
||||||
|
if c == '"' {
|
||||||
|
return `'"'`
|
||||||
|
}
|
||||||
|
|
||||||
|
// use quoted string with different quotation marks
|
||||||
|
s := strconv.Quote(string(c))
|
||||||
|
return "'" + s[1:len(s)-1] + "'"
|
||||||
|
}
|
513
common/contextjson/stream.go
Normal file
513
common/contextjson/stream.go
Normal file
@ -0,0 +1,513 @@
|
|||||||
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package json
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Decoder reads and decodes JSON values from an input stream.
|
||||||
|
type Decoder struct {
|
||||||
|
r io.Reader
|
||||||
|
buf []byte
|
||||||
|
d decodeState
|
||||||
|
scanp int // start of unread data in buf
|
||||||
|
scanned int64 // amount of data already scanned
|
||||||
|
scan scanner
|
||||||
|
err error
|
||||||
|
|
||||||
|
tokenState int
|
||||||
|
tokenStack []int
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDecoder returns a new decoder that reads from r.
|
||||||
|
//
|
||||||
|
// The decoder introduces its own buffering and may
|
||||||
|
// read data from r beyond the JSON values requested.
|
||||||
|
func NewDecoder(r io.Reader) *Decoder {
|
||||||
|
return &Decoder{r: r}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
|
||||||
|
// Number instead of as a float64.
|
||||||
|
func (dec *Decoder) UseNumber() { dec.d.useNumber = true }
|
||||||
|
|
||||||
|
// DisallowUnknownFields causes the Decoder to return an error when the destination
|
||||||
|
// is a struct and the input contains object keys which do not match any
|
||||||
|
// non-ignored, exported fields in the destination.
|
||||||
|
func (dec *Decoder) DisallowUnknownFields() { dec.d.disallowUnknownFields = true }
|
||||||
|
|
||||||
|
// Decode reads the next JSON-encoded value from its
|
||||||
|
// input and stores it in the value pointed to by v.
|
||||||
|
//
|
||||||
|
// See the documentation for Unmarshal for details about
|
||||||
|
// the conversion of JSON into a Go value.
|
||||||
|
func (dec *Decoder) Decode(v any) error {
|
||||||
|
if dec.err != nil {
|
||||||
|
return dec.err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := dec.tokenPrepareForDecode(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !dec.tokenValueAllowed() {
|
||||||
|
return &SyntaxError{msg: "not at beginning of value", Offset: dec.InputOffset()}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read whole value into buffer.
|
||||||
|
n, err := dec.readValue()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dec.d.init(dec.buf[dec.scanp : dec.scanp+n])
|
||||||
|
dec.scanp += n
|
||||||
|
|
||||||
|
// Don't save err from unmarshal into dec.err:
|
||||||
|
// the connection is still usable since we read a complete JSON
|
||||||
|
// object from it before the error happened.
|
||||||
|
err = dec.d.unmarshal(v)
|
||||||
|
|
||||||
|
// fixup token streaming state
|
||||||
|
dec.tokenValueEnd()
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Buffered returns a reader of the data remaining in the Decoder's
|
||||||
|
// buffer. The reader is valid until the next call to Decode.
|
||||||
|
func (dec *Decoder) Buffered() io.Reader {
|
||||||
|
return bytes.NewReader(dec.buf[dec.scanp:])
|
||||||
|
}
|
||||||
|
|
||||||
|
// readValue reads a JSON value into dec.buf.
|
||||||
|
// It returns the length of the encoding.
|
||||||
|
func (dec *Decoder) readValue() (int, error) {
|
||||||
|
dec.scan.reset()
|
||||||
|
|
||||||
|
scanp := dec.scanp
|
||||||
|
var err error
|
||||||
|
Input:
|
||||||
|
// help the compiler see that scanp is never negative, so it can remove
|
||||||
|
// some bounds checks below.
|
||||||
|
for scanp >= 0 {
|
||||||
|
|
||||||
|
// Look in the buffer for a new value.
|
||||||
|
for ; scanp < len(dec.buf); scanp++ {
|
||||||
|
c := dec.buf[scanp]
|
||||||
|
dec.scan.bytes++
|
||||||
|
switch dec.scan.step(&dec.scan, c) {
|
||||||
|
case scanEnd:
|
||||||
|
// scanEnd is delayed one byte so we decrement
|
||||||
|
// the scanner bytes count by 1 to ensure that
|
||||||
|
// this value is correct in the next call of Decode.
|
||||||
|
dec.scan.bytes--
|
||||||
|
break Input
|
||||||
|
case scanEndObject, scanEndArray:
|
||||||
|
// scanEnd is delayed one byte.
|
||||||
|
// We might block trying to get that byte from src,
|
||||||
|
// so instead invent a space byte.
|
||||||
|
if stateEndValue(&dec.scan, ' ') == scanEnd {
|
||||||
|
scanp++
|
||||||
|
break Input
|
||||||
|
}
|
||||||
|
case scanError:
|
||||||
|
dec.err = dec.scan.err
|
||||||
|
return 0, dec.scan.err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Did the last read have an error?
|
||||||
|
// Delayed until now to allow buffer scan.
|
||||||
|
if err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
if dec.scan.step(&dec.scan, ' ') == scanEnd {
|
||||||
|
break Input
|
||||||
|
}
|
||||||
|
if nonSpace(dec.buf) {
|
||||||
|
err = io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
}
|
||||||
|
dec.err = err
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
n := scanp - dec.scanp
|
||||||
|
err = dec.refill()
|
||||||
|
scanp = dec.scanp + n
|
||||||
|
}
|
||||||
|
return scanp - dec.scanp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dec *Decoder) refill() error {
|
||||||
|
// Make room to read more into the buffer.
|
||||||
|
// First slide down data already consumed.
|
||||||
|
if dec.scanp > 0 {
|
||||||
|
dec.scanned += int64(dec.scanp)
|
||||||
|
n := copy(dec.buf, dec.buf[dec.scanp:])
|
||||||
|
dec.buf = dec.buf[:n]
|
||||||
|
dec.scanp = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Grow buffer if not large enough.
|
||||||
|
const minRead = 512
|
||||||
|
if cap(dec.buf)-len(dec.buf) < minRead {
|
||||||
|
newBuf := make([]byte, len(dec.buf), 2*cap(dec.buf)+minRead)
|
||||||
|
copy(newBuf, dec.buf)
|
||||||
|
dec.buf = newBuf
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read. Delay error for next iteration (after scan).
|
||||||
|
n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)])
|
||||||
|
dec.buf = dec.buf[0 : len(dec.buf)+n]
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func nonSpace(b []byte) bool {
|
||||||
|
for _, c := range b {
|
||||||
|
if !isSpace(c) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// An Encoder writes JSON values to an output stream.
|
||||||
|
type Encoder struct {
|
||||||
|
w io.Writer
|
||||||
|
err error
|
||||||
|
escapeHTML bool
|
||||||
|
|
||||||
|
indentBuf []byte
|
||||||
|
indentPrefix string
|
||||||
|
indentValue string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEncoder returns a new encoder that writes to w.
|
||||||
|
func NewEncoder(w io.Writer) *Encoder {
|
||||||
|
return &Encoder{w: w, escapeHTML: true}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encode writes the JSON encoding of v to the stream,
|
||||||
|
// followed by a newline character.
|
||||||
|
//
|
||||||
|
// See the documentation for Marshal for details about the
|
||||||
|
// conversion of Go values to JSON.
|
||||||
|
func (enc *Encoder) Encode(v any) error {
|
||||||
|
if enc.err != nil {
|
||||||
|
return enc.err
|
||||||
|
}
|
||||||
|
|
||||||
|
e := newEncodeState()
|
||||||
|
defer encodeStatePool.Put(e)
|
||||||
|
|
||||||
|
err := e.marshal(v, encOpts{escapeHTML: enc.escapeHTML})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Terminate each value with a newline.
|
||||||
|
// This makes the output look a little nicer
|
||||||
|
// when debugging, and some kind of space
|
||||||
|
// is required if the encoded value was a number,
|
||||||
|
// so that the reader knows there aren't more
|
||||||
|
// digits coming.
|
||||||
|
e.WriteByte('\n')
|
||||||
|
|
||||||
|
b := e.Bytes()
|
||||||
|
if enc.indentPrefix != "" || enc.indentValue != "" {
|
||||||
|
enc.indentBuf, err = appendIndent(enc.indentBuf[:0], b, enc.indentPrefix, enc.indentValue)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
b = enc.indentBuf
|
||||||
|
}
|
||||||
|
if _, err = enc.w.Write(b); err != nil {
|
||||||
|
enc.err = err
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetIndent instructs the encoder to format each subsequent encoded
|
||||||
|
// value as if indented by the package-level function Indent(dst, src, prefix, indent).
|
||||||
|
// Calling SetIndent("", "") disables indentation.
|
||||||
|
func (enc *Encoder) SetIndent(prefix, indent string) {
|
||||||
|
enc.indentPrefix = prefix
|
||||||
|
enc.indentValue = indent
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetEscapeHTML specifies whether problematic HTML characters
|
||||||
|
// should be escaped inside JSON quoted strings.
|
||||||
|
// The default behavior is to escape &, <, and > to \u0026, \u003c, and \u003e
|
||||||
|
// to avoid certain safety problems that can arise when embedding JSON in HTML.
|
||||||
|
//
|
||||||
|
// In non-HTML settings where the escaping interferes with the readability
|
||||||
|
// of the output, SetEscapeHTML(false) disables this behavior.
|
||||||
|
func (enc *Encoder) SetEscapeHTML(on bool) {
|
||||||
|
enc.escapeHTML = on
|
||||||
|
}
|
||||||
|
|
||||||
|
// RawMessage is a raw encoded JSON value.
|
||||||
|
// It implements Marshaler and Unmarshaler and can
|
||||||
|
// be used to delay JSON decoding or precompute a JSON encoding.
|
||||||
|
type RawMessage []byte
|
||||||
|
|
||||||
|
// MarshalJSON returns m as the JSON encoding of m.
|
||||||
|
func (m RawMessage) MarshalJSON() ([]byte, error) {
|
||||||
|
if m == nil {
|
||||||
|
return []byte("null"), nil
|
||||||
|
}
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON sets *m to a copy of data.
|
||||||
|
func (m *RawMessage) UnmarshalJSON(data []byte) error {
|
||||||
|
if m == nil {
|
||||||
|
return errors.New("json.RawMessage: UnmarshalJSON on nil pointer")
|
||||||
|
}
|
||||||
|
*m = append((*m)[0:0], data...)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
_ Marshaler = (*RawMessage)(nil)
|
||||||
|
_ Unmarshaler = (*RawMessage)(nil)
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Token holds a value of one of these types:
|
||||||
|
//
|
||||||
|
// Delim, for the four JSON delimiters [ ] { }
|
||||||
|
// bool, for JSON booleans
|
||||||
|
// float64, for JSON numbers
|
||||||
|
// Number, for JSON numbers
|
||||||
|
// string, for JSON string literals
|
||||||
|
// nil, for JSON null
|
||||||
|
type Token any
|
||||||
|
|
||||||
|
const (
|
||||||
|
tokenTopValue = iota
|
||||||
|
tokenArrayStart
|
||||||
|
tokenArrayValue
|
||||||
|
tokenArrayComma
|
||||||
|
tokenObjectStart
|
||||||
|
tokenObjectKey
|
||||||
|
tokenObjectColon
|
||||||
|
tokenObjectValue
|
||||||
|
tokenObjectComma
|
||||||
|
)
|
||||||
|
|
||||||
|
// advance tokenstate from a separator state to a value state
|
||||||
|
func (dec *Decoder) tokenPrepareForDecode() error {
|
||||||
|
// Note: Not calling peek before switch, to avoid
|
||||||
|
// putting peek into the standard Decode path.
|
||||||
|
// peek is only called when using the Token API.
|
||||||
|
switch dec.tokenState {
|
||||||
|
case tokenArrayComma:
|
||||||
|
c, err := dec.peek()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if c != ',' {
|
||||||
|
return &SyntaxError{"expected comma after array element", dec.InputOffset()}
|
||||||
|
}
|
||||||
|
dec.scanp++
|
||||||
|
dec.tokenState = tokenArrayValue
|
||||||
|
case tokenObjectColon:
|
||||||
|
c, err := dec.peek()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if c != ':' {
|
||||||
|
return &SyntaxError{"expected colon after object key", dec.InputOffset()}
|
||||||
|
}
|
||||||
|
dec.scanp++
|
||||||
|
dec.tokenState = tokenObjectValue
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dec *Decoder) tokenValueAllowed() bool {
|
||||||
|
switch dec.tokenState {
|
||||||
|
case tokenTopValue, tokenArrayStart, tokenArrayValue, tokenObjectValue:
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dec *Decoder) tokenValueEnd() {
|
||||||
|
switch dec.tokenState {
|
||||||
|
case tokenArrayStart, tokenArrayValue:
|
||||||
|
dec.tokenState = tokenArrayComma
|
||||||
|
case tokenObjectValue:
|
||||||
|
dec.tokenState = tokenObjectComma
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Delim is a JSON array or object delimiter, one of [ ] { or }.
|
||||||
|
type Delim rune
|
||||||
|
|
||||||
|
func (d Delim) String() string {
|
||||||
|
return string(d)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Token returns the next JSON token in the input stream.
|
||||||
|
// At the end of the input stream, Token returns nil, io.EOF.
|
||||||
|
//
|
||||||
|
// Token guarantees that the delimiters [ ] { } it returns are
|
||||||
|
// properly nested and matched: if Token encounters an unexpected
|
||||||
|
// delimiter in the input, it will return an error.
|
||||||
|
//
|
||||||
|
// The input stream consists of basic JSON values—bool, string,
|
||||||
|
// number, and null—along with delimiters [ ] { } of type Delim
|
||||||
|
// to mark the start and end of arrays and objects.
|
||||||
|
// Commas and colons are elided.
|
||||||
|
func (dec *Decoder) Token() (Token, error) {
|
||||||
|
for {
|
||||||
|
c, err := dec.peek()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
switch c {
|
||||||
|
case '[':
|
||||||
|
if !dec.tokenValueAllowed() {
|
||||||
|
return dec.tokenError(c)
|
||||||
|
}
|
||||||
|
dec.scanp++
|
||||||
|
dec.tokenStack = append(dec.tokenStack, dec.tokenState)
|
||||||
|
dec.tokenState = tokenArrayStart
|
||||||
|
return Delim('['), nil
|
||||||
|
|
||||||
|
case ']':
|
||||||
|
if dec.tokenState != tokenArrayStart && dec.tokenState != tokenArrayComma {
|
||||||
|
return dec.tokenError(c)
|
||||||
|
}
|
||||||
|
dec.scanp++
|
||||||
|
dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
|
||||||
|
dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
|
||||||
|
dec.tokenValueEnd()
|
||||||
|
return Delim(']'), nil
|
||||||
|
|
||||||
|
case '{':
|
||||||
|
if !dec.tokenValueAllowed() {
|
||||||
|
return dec.tokenError(c)
|
||||||
|
}
|
||||||
|
dec.scanp++
|
||||||
|
dec.tokenStack = append(dec.tokenStack, dec.tokenState)
|
||||||
|
dec.tokenState = tokenObjectStart
|
||||||
|
return Delim('{'), nil
|
||||||
|
|
||||||
|
case '}':
|
||||||
|
if dec.tokenState != tokenObjectStart && dec.tokenState != tokenObjectComma {
|
||||||
|
return dec.tokenError(c)
|
||||||
|
}
|
||||||
|
dec.scanp++
|
||||||
|
dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
|
||||||
|
dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
|
||||||
|
dec.tokenValueEnd()
|
||||||
|
return Delim('}'), nil
|
||||||
|
|
||||||
|
case ':':
|
||||||
|
if dec.tokenState != tokenObjectColon {
|
||||||
|
return dec.tokenError(c)
|
||||||
|
}
|
||||||
|
dec.scanp++
|
||||||
|
dec.tokenState = tokenObjectValue
|
||||||
|
continue
|
||||||
|
|
||||||
|
case ',':
|
||||||
|
if dec.tokenState == tokenArrayComma {
|
||||||
|
dec.scanp++
|
||||||
|
dec.tokenState = tokenArrayValue
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if dec.tokenState == tokenObjectComma {
|
||||||
|
dec.scanp++
|
||||||
|
dec.tokenState = tokenObjectKey
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return dec.tokenError(c)
|
||||||
|
|
||||||
|
case '"':
|
||||||
|
if dec.tokenState == tokenObjectStart || dec.tokenState == tokenObjectKey {
|
||||||
|
var x string
|
||||||
|
old := dec.tokenState
|
||||||
|
dec.tokenState = tokenTopValue
|
||||||
|
err := dec.Decode(&x)
|
||||||
|
dec.tokenState = old
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
dec.tokenState = tokenObjectColon
|
||||||
|
return x, nil
|
||||||
|
}
|
||||||
|
fallthrough
|
||||||
|
|
||||||
|
default:
|
||||||
|
if !dec.tokenValueAllowed() {
|
||||||
|
return dec.tokenError(c)
|
||||||
|
}
|
||||||
|
var x any
|
||||||
|
if err := dec.Decode(&x); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return x, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dec *Decoder) tokenError(c byte) (Token, error) {
|
||||||
|
var context string
|
||||||
|
switch dec.tokenState {
|
||||||
|
case tokenTopValue:
|
||||||
|
context = " looking for beginning of value"
|
||||||
|
case tokenArrayStart, tokenArrayValue, tokenObjectValue:
|
||||||
|
context = " looking for beginning of value"
|
||||||
|
case tokenArrayComma:
|
||||||
|
context = " after array element"
|
||||||
|
case tokenObjectKey:
|
||||||
|
context = " looking for beginning of object key string"
|
||||||
|
case tokenObjectColon:
|
||||||
|
context = " after object key"
|
||||||
|
case tokenObjectComma:
|
||||||
|
context = " after object key:value pair"
|
||||||
|
}
|
||||||
|
return nil, &SyntaxError{"invalid character " + quoteChar(c) + context, dec.InputOffset()}
|
||||||
|
}
|
||||||
|
|
||||||
|
// More reports whether there is another element in the
|
||||||
|
// current array or object being parsed.
|
||||||
|
func (dec *Decoder) More() bool {
|
||||||
|
c, err := dec.peek()
|
||||||
|
return err == nil && c != ']' && c != '}'
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dec *Decoder) peek() (byte, error) {
|
||||||
|
var err error
|
||||||
|
for {
|
||||||
|
for i := dec.scanp; i < len(dec.buf); i++ {
|
||||||
|
c := dec.buf[i]
|
||||||
|
if isSpace(c) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
dec.scanp = i
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
// buffer has been scanned, now report any error
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
err = dec.refill()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// InputOffset returns the input stream byte offset of the current decoder position.
|
||||||
|
// The offset gives the location of the end of the most recently returned token
|
||||||
|
// and the beginning of the next token.
|
||||||
|
func (dec *Decoder) InputOffset() int64 {
|
||||||
|
return dec.scanned + int64(dec.scanp)
|
||||||
|
}
|
218
common/contextjson/tables.go
Normal file
218
common/contextjson/tables.go
Normal file
@ -0,0 +1,218 @@
|
|||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package json
|
||||||
|
|
||||||
|
import "unicode/utf8"
|
||||||
|
|
||||||
|
// safeSet holds the value true if the ASCII character with the given array
|
||||||
|
// position can be represented inside a JSON string without any further
|
||||||
|
// escaping.
|
||||||
|
//
|
||||||
|
// All values are true except for the ASCII control characters (0-31), the
|
||||||
|
// double quote ("), and the backslash character ("\").
|
||||||
|
var safeSet = [utf8.RuneSelf]bool{
|
||||||
|
' ': true,
|
||||||
|
'!': true,
|
||||||
|
'"': false,
|
||||||
|
'#': true,
|
||||||
|
'$': true,
|
||||||
|
'%': true,
|
||||||
|
'&': true,
|
||||||
|
'\'': true,
|
||||||
|
'(': true,
|
||||||
|
')': true,
|
||||||
|
'*': true,
|
||||||
|
'+': true,
|
||||||
|
',': true,
|
||||||
|
'-': true,
|
||||||
|
'.': true,
|
||||||
|
'/': true,
|
||||||
|
'0': true,
|
||||||
|
'1': true,
|
||||||
|
'2': true,
|
||||||
|
'3': true,
|
||||||
|
'4': true,
|
||||||
|
'5': true,
|
||||||
|
'6': true,
|
||||||
|
'7': true,
|
||||||
|
'8': true,
|
||||||
|
'9': true,
|
||||||
|
':': true,
|
||||||
|
';': true,
|
||||||
|
'<': true,
|
||||||
|
'=': true,
|
||||||
|
'>': true,
|
||||||
|
'?': true,
|
||||||
|
'@': true,
|
||||||
|
'A': true,
|
||||||
|
'B': true,
|
||||||
|
'C': true,
|
||||||
|
'D': true,
|
||||||
|
'E': true,
|
||||||
|
'F': true,
|
||||||
|
'G': true,
|
||||||
|
'H': true,
|
||||||
|
'I': true,
|
||||||
|
'J': true,
|
||||||
|
'K': true,
|
||||||
|
'L': true,
|
||||||
|
'M': true,
|
||||||
|
'N': true,
|
||||||
|
'O': true,
|
||||||
|
'P': true,
|
||||||
|
'Q': true,
|
||||||
|
'R': true,
|
||||||
|
'S': true,
|
||||||
|
'T': true,
|
||||||
|
'U': true,
|
||||||
|
'V': true,
|
||||||
|
'W': true,
|
||||||
|
'X': true,
|
||||||
|
'Y': true,
|
||||||
|
'Z': true,
|
||||||
|
'[': true,
|
||||||
|
'\\': false,
|
||||||
|
']': true,
|
||||||
|
'^': true,
|
||||||
|
'_': true,
|
||||||
|
'`': true,
|
||||||
|
'a': true,
|
||||||
|
'b': true,
|
||||||
|
'c': true,
|
||||||
|
'd': true,
|
||||||
|
'e': true,
|
||||||
|
'f': true,
|
||||||
|
'g': true,
|
||||||
|
'h': true,
|
||||||
|
'i': true,
|
||||||
|
'j': true,
|
||||||
|
'k': true,
|
||||||
|
'l': true,
|
||||||
|
'm': true,
|
||||||
|
'n': true,
|
||||||
|
'o': true,
|
||||||
|
'p': true,
|
||||||
|
'q': true,
|
||||||
|
'r': true,
|
||||||
|
's': true,
|
||||||
|
't': true,
|
||||||
|
'u': true,
|
||||||
|
'v': true,
|
||||||
|
'w': true,
|
||||||
|
'x': true,
|
||||||
|
'y': true,
|
||||||
|
'z': true,
|
||||||
|
'{': true,
|
||||||
|
'|': true,
|
||||||
|
'}': true,
|
||||||
|
'~': true,
|
||||||
|
'\u007f': true,
|
||||||
|
}
|
||||||
|
|
||||||
|
// htmlSafeSet holds the value true if the ASCII character with the given
|
||||||
|
// array position can be safely represented inside a JSON string, embedded
|
||||||
|
// inside of HTML <script> tags, without any additional escaping.
|
||||||
|
//
|
||||||
|
// All values are true except for the ASCII control characters (0-31), the
|
||||||
|
// double quote ("), the backslash character ("\"), HTML opening and closing
|
||||||
|
// tags ("<" and ">"), and the ampersand ("&").
|
||||||
|
var htmlSafeSet = [utf8.RuneSelf]bool{
|
||||||
|
' ': true,
|
||||||
|
'!': true,
|
||||||
|
'"': false,
|
||||||
|
'#': true,
|
||||||
|
'$': true,
|
||||||
|
'%': true,
|
||||||
|
'&': false,
|
||||||
|
'\'': true,
|
||||||
|
'(': true,
|
||||||
|
')': true,
|
||||||
|
'*': true,
|
||||||
|
'+': true,
|
||||||
|
',': true,
|
||||||
|
'-': true,
|
||||||
|
'.': true,
|
||||||
|
'/': true,
|
||||||
|
'0': true,
|
||||||
|
'1': true,
|
||||||
|
'2': true,
|
||||||
|
'3': true,
|
||||||
|
'4': true,
|
||||||
|
'5': true,
|
||||||
|
'6': true,
|
||||||
|
'7': true,
|
||||||
|
'8': true,
|
||||||
|
'9': true,
|
||||||
|
':': true,
|
||||||
|
';': true,
|
||||||
|
'<': false,
|
||||||
|
'=': true,
|
||||||
|
'>': false,
|
||||||
|
'?': true,
|
||||||
|
'@': true,
|
||||||
|
'A': true,
|
||||||
|
'B': true,
|
||||||
|
'C': true,
|
||||||
|
'D': true,
|
||||||
|
'E': true,
|
||||||
|
'F': true,
|
||||||
|
'G': true,
|
||||||
|
'H': true,
|
||||||
|
'I': true,
|
||||||
|
'J': true,
|
||||||
|
'K': true,
|
||||||
|
'L': true,
|
||||||
|
'M': true,
|
||||||
|
'N': true,
|
||||||
|
'O': true,
|
||||||
|
'P': true,
|
||||||
|
'Q': true,
|
||||||
|
'R': true,
|
||||||
|
'S': true,
|
||||||
|
'T': true,
|
||||||
|
'U': true,
|
||||||
|
'V': true,
|
||||||
|
'W': true,
|
||||||
|
'X': true,
|
||||||
|
'Y': true,
|
||||||
|
'Z': true,
|
||||||
|
'[': true,
|
||||||
|
'\\': false,
|
||||||
|
']': true,
|
||||||
|
'^': true,
|
||||||
|
'_': true,
|
||||||
|
'`': true,
|
||||||
|
'a': true,
|
||||||
|
'b': true,
|
||||||
|
'c': true,
|
||||||
|
'd': true,
|
||||||
|
'e': true,
|
||||||
|
'f': true,
|
||||||
|
'g': true,
|
||||||
|
'h': true,
|
||||||
|
'i': true,
|
||||||
|
'j': true,
|
||||||
|
'k': true,
|
||||||
|
'l': true,
|
||||||
|
'm': true,
|
||||||
|
'n': true,
|
||||||
|
'o': true,
|
||||||
|
'p': true,
|
||||||
|
'q': true,
|
||||||
|
'r': true,
|
||||||
|
's': true,
|
||||||
|
't': true,
|
||||||
|
'u': true,
|
||||||
|
'v': true,
|
||||||
|
'w': true,
|
||||||
|
'x': true,
|
||||||
|
'y': true,
|
||||||
|
'z': true,
|
||||||
|
'{': true,
|
||||||
|
'|': true,
|
||||||
|
'}': true,
|
||||||
|
'~': true,
|
||||||
|
'\u007f': true,
|
||||||
|
}
|
38
common/contextjson/tags.go
Normal file
38
common/contextjson/tags.go
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package json
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// tagOptions is the string following a comma in a struct field's "json"
|
||||||
|
// tag, or the empty string. It does not include the leading comma.
|
||||||
|
type tagOptions string
|
||||||
|
|
||||||
|
// parseTag splits a struct field's json tag into its name and
|
||||||
|
// comma-separated options.
|
||||||
|
func parseTag(tag string) (string, tagOptions) {
|
||||||
|
tag, opt, _ := strings.Cut(tag, ",")
|
||||||
|
return tag, tagOptions(opt)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Contains reports whether a comma-separated list of options
|
||||||
|
// contains a particular substr flag. substr must be surrounded by a
|
||||||
|
// string boundary or commas.
|
||||||
|
func (o tagOptions) Contains(optionName string) bool {
|
||||||
|
if len(o) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
s := string(o)
|
||||||
|
for s != "" {
|
||||||
|
var name string
|
||||||
|
name, s, _ = strings.Cut(s, ",")
|
||||||
|
if name == optionName {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
@ -18,11 +18,19 @@ func NewRouter(router adapter.Router) N.Dialer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *RouterDialer) DialContext(ctx context.Context, network string, destination M.Socksaddr) (net.Conn, error) {
|
func (d *RouterDialer) DialContext(ctx context.Context, network string, destination M.Socksaddr) (net.Conn, error) {
|
||||||
return d.router.DefaultOutbound(network).DialContext(ctx, network, destination)
|
dialer, err := d.router.DefaultOutbound(network)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dialer.DialContext(ctx, network, destination)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *RouterDialer) ListenPacket(ctx context.Context, destination M.Socksaddr) (net.PacketConn, error) {
|
func (d *RouterDialer) ListenPacket(ctx context.Context, destination M.Socksaddr) (net.PacketConn, error) {
|
||||||
return d.router.DefaultOutbound(N.NetworkUDP).ListenPacket(ctx, destination)
|
dialer, err := d.router.DefaultOutbound(N.NetworkUDP)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dialer.ListenPacket(ctx, destination)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *RouterDialer) Upstream() any {
|
func (d *RouterDialer) Upstream() any {
|
||||||
|
21
common/json/context.go
Normal file
21
common/json/context.go
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
//go:build go1.21 && !without_contextjson
|
||||||
|
|
||||||
|
package json
|
||||||
|
|
||||||
|
import "github.com/sagernet/sing-box/common/contextjson"
|
||||||
|
|
||||||
|
var (
|
||||||
|
Marshal = json.Marshal
|
||||||
|
Unmarshal = json.Unmarshal
|
||||||
|
NewEncoder = json.NewEncoder
|
||||||
|
NewDecoder = json.NewDecoder
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
Encoder = json.Encoder
|
||||||
|
Decoder = json.Decoder
|
||||||
|
Token = json.Token
|
||||||
|
Delim = json.Delim
|
||||||
|
SyntaxError = json.SyntaxError
|
||||||
|
RawMessage = json.RawMessage
|
||||||
|
)
|
@ -1,3 +1,5 @@
|
|||||||
|
//go:build !go1.21 || without_contextjson
|
||||||
|
|
||||||
package json
|
package json
|
||||||
|
|
||||||
import "encoding/json"
|
import "encoding/json"
|
||||||
@ -15,4 +17,5 @@ type (
|
|||||||
Token = json.Token
|
Token = json.Token
|
||||||
Delim = json.Delim
|
Delim = json.Delim
|
||||||
SyntaxError = json.SyntaxError
|
SyntaxError = json.SyntaxError
|
||||||
|
RawMessage = json.RawMessage
|
||||||
)
|
)
|
||||||
|
485
common/srs/binary.go
Normal file
485
common/srs/binary.go
Normal file
@ -0,0 +1,485 @@
|
|||||||
|
package srs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"compress/zlib"
|
||||||
|
"encoding/binary"
|
||||||
|
"io"
|
||||||
|
"net/netip"
|
||||||
|
|
||||||
|
C "github.com/sagernet/sing-box/constant"
|
||||||
|
"github.com/sagernet/sing-box/option"
|
||||||
|
"github.com/sagernet/sing/common"
|
||||||
|
"github.com/sagernet/sing/common/domain"
|
||||||
|
E "github.com/sagernet/sing/common/exceptions"
|
||||||
|
"github.com/sagernet/sing/common/rw"
|
||||||
|
|
||||||
|
"go4.org/netipx"
|
||||||
|
)
|
||||||
|
|
||||||
|
var MagicBytes = [3]byte{0x53, 0x52, 0x53} // SRS
|
||||||
|
|
||||||
|
const (
|
||||||
|
ruleItemQueryType uint8 = iota
|
||||||
|
ruleItemNetwork
|
||||||
|
ruleItemDomain
|
||||||
|
ruleItemDomainKeyword
|
||||||
|
ruleItemDomainRegex
|
||||||
|
ruleItemSourceIPCIDR
|
||||||
|
ruleItemIPCIDR
|
||||||
|
ruleItemSourcePort
|
||||||
|
ruleItemSourcePortRange
|
||||||
|
ruleItemPort
|
||||||
|
ruleItemPortRange
|
||||||
|
ruleItemProcessName
|
||||||
|
ruleItemProcessPath
|
||||||
|
ruleItemPackageName
|
||||||
|
ruleItemWIFISSID
|
||||||
|
ruleItemWIFIBSSID
|
||||||
|
ruleItemFinal uint8 = 0xFF
|
||||||
|
)
|
||||||
|
|
||||||
|
func Read(reader io.Reader, recovery bool) (ruleSet option.PlainRuleSet, err error) {
|
||||||
|
var magicBytes [3]byte
|
||||||
|
_, err = io.ReadFull(reader, magicBytes[:])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if magicBytes != MagicBytes {
|
||||||
|
err = E.New("invalid sing-box rule set file")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var version uint8
|
||||||
|
err = binary.Read(reader, binary.BigEndian, &version)
|
||||||
|
if err != nil {
|
||||||
|
return ruleSet, err
|
||||||
|
}
|
||||||
|
if version != 1 {
|
||||||
|
return ruleSet, E.New("unsupported version: ", version)
|
||||||
|
}
|
||||||
|
zReader, err := zlib.NewReader(reader)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
length, err := rw.ReadUVariant(zReader)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ruleSet.Rules = make([]option.HeadlessRule, length)
|
||||||
|
for i := uint64(0); i < length; i++ {
|
||||||
|
ruleSet.Rules[i], err = readRule(zReader, recovery)
|
||||||
|
if err != nil {
|
||||||
|
err = E.Cause(err, "read rule[", i, "]")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func Write(writer io.Writer, ruleSet option.PlainRuleSet) error {
|
||||||
|
_, err := writer.Write(MagicBytes[:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = binary.Write(writer, binary.BigEndian, uint8(1))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
zWriter, err := zlib.NewWriterLevel(writer, zlib.BestCompression)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = rw.WriteUVariant(zWriter, uint64(len(ruleSet.Rules)))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, rule := range ruleSet.Rules {
|
||||||
|
err = writeRule(zWriter, rule)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return zWriter.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func readRule(reader io.Reader, recovery bool) (rule option.HeadlessRule, err error) {
|
||||||
|
var ruleType uint8
|
||||||
|
err = binary.Read(reader, binary.BigEndian, &ruleType)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch ruleType {
|
||||||
|
case 0:
|
||||||
|
rule.DefaultOptions, err = readDefaultRule(reader, recovery)
|
||||||
|
case 1:
|
||||||
|
rule.LogicalOptions, err = readLogicalRule(reader, recovery)
|
||||||
|
default:
|
||||||
|
err = E.New("unknown rule type: ", ruleType)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeRule(writer io.Writer, rule option.HeadlessRule) error {
|
||||||
|
switch rule.Type {
|
||||||
|
case C.RuleTypeDefault:
|
||||||
|
return writeDefaultRule(writer, rule.DefaultOptions)
|
||||||
|
case C.RuleTypeLogical:
|
||||||
|
return writeLogicalRule(writer, rule.LogicalOptions)
|
||||||
|
default:
|
||||||
|
panic("unknown rule type: " + rule.Type)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func readDefaultRule(reader io.Reader, recovery bool) (rule option.DefaultHeadlessRule, err error) {
|
||||||
|
var lastItemType uint8
|
||||||
|
for {
|
||||||
|
var itemType uint8
|
||||||
|
err = binary.Read(reader, binary.BigEndian, &itemType)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch itemType {
|
||||||
|
case ruleItemQueryType:
|
||||||
|
var rawQueryType []uint16
|
||||||
|
rawQueryType, err = readRuleItemUint16(reader)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
rule.QueryType = common.Map(rawQueryType, func(it uint16) option.DNSQueryType {
|
||||||
|
return option.DNSQueryType(it)
|
||||||
|
})
|
||||||
|
case ruleItemNetwork:
|
||||||
|
rule.Network, err = readRuleItemString(reader)
|
||||||
|
case ruleItemDomain:
|
||||||
|
var matcher *domain.Matcher
|
||||||
|
matcher, err = domain.ReadMatcher(reader)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
rule.DomainMatcher = matcher
|
||||||
|
case ruleItemDomainKeyword:
|
||||||
|
rule.DomainKeyword, err = readRuleItemString(reader)
|
||||||
|
case ruleItemDomainRegex:
|
||||||
|
rule.DomainRegex, err = readRuleItemString(reader)
|
||||||
|
case ruleItemSourceIPCIDR:
|
||||||
|
rule.SourceIPSet, err = readIPSet(reader)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if recovery {
|
||||||
|
rule.SourceIPCIDR = common.Map(rule.SourceIPSet.Prefixes(), netip.Prefix.String)
|
||||||
|
}
|
||||||
|
case ruleItemIPCIDR:
|
||||||
|
rule.IPSet, err = readIPSet(reader)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if recovery {
|
||||||
|
rule.IPCIDR = common.Map(rule.IPSet.Prefixes(), netip.Prefix.String)
|
||||||
|
}
|
||||||
|
case ruleItemSourcePort:
|
||||||
|
rule.SourcePort, err = readRuleItemUint16(reader)
|
||||||
|
case ruleItemSourcePortRange:
|
||||||
|
rule.SourcePortRange, err = readRuleItemString(reader)
|
||||||
|
case ruleItemPort:
|
||||||
|
rule.Port, err = readRuleItemUint16(reader)
|
||||||
|
case ruleItemPortRange:
|
||||||
|
rule.PortRange, err = readRuleItemString(reader)
|
||||||
|
case ruleItemProcessName:
|
||||||
|
rule.ProcessName, err = readRuleItemString(reader)
|
||||||
|
case ruleItemProcessPath:
|
||||||
|
rule.ProcessPath, err = readRuleItemString(reader)
|
||||||
|
case ruleItemPackageName:
|
||||||
|
rule.PackageName, err = readRuleItemString(reader)
|
||||||
|
case ruleItemWIFISSID:
|
||||||
|
rule.WIFISSID, err = readRuleItemString(reader)
|
||||||
|
case ruleItemWIFIBSSID:
|
||||||
|
rule.WIFIBSSID, err = readRuleItemString(reader)
|
||||||
|
case ruleItemFinal:
|
||||||
|
err = binary.Read(reader, binary.BigEndian, &rule.Invert)
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
err = E.New("unknown rule item type: ", itemType, ", last type: ", lastItemType)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
lastItemType = itemType
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeDefaultRule(writer io.Writer, rule option.DefaultHeadlessRule) error {
|
||||||
|
err := binary.Write(writer, binary.BigEndian, uint8(0))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(rule.QueryType) > 0 {
|
||||||
|
err = writeRuleItemUint16(writer, ruleItemQueryType, common.Map(rule.QueryType, func(it option.DNSQueryType) uint16 {
|
||||||
|
return uint16(it)
|
||||||
|
}))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(rule.Network) > 0 {
|
||||||
|
err = writeRuleItemString(writer, ruleItemNetwork, rule.Network)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(rule.Domain) > 0 || len(rule.DomainSuffix) > 0 {
|
||||||
|
err = binary.Write(writer, binary.BigEndian, ruleItemDomain)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = domain.NewMatcher(rule.Domain, rule.DomainSuffix).Write(writer)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(rule.DomainKeyword) > 0 {
|
||||||
|
err = writeRuleItemString(writer, ruleItemDomainKeyword, rule.DomainKeyword)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(rule.DomainRegex) > 0 {
|
||||||
|
err = writeRuleItemString(writer, ruleItemDomainRegex, rule.DomainRegex)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(rule.SourceIPCIDR) > 0 {
|
||||||
|
err = writeRuleItemCIDR(writer, ruleItemSourceIPCIDR, rule.SourceIPCIDR)
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "source_ipcidr")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(rule.IPCIDR) > 0 {
|
||||||
|
err = writeRuleItemCIDR(writer, ruleItemIPCIDR, rule.IPCIDR)
|
||||||
|
if err != nil {
|
||||||
|
return E.Cause(err, "ipcidr")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(rule.SourcePort) > 0 {
|
||||||
|
err = writeRuleItemUint16(writer, ruleItemSourcePort, rule.SourcePort)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(rule.SourcePortRange) > 0 {
|
||||||
|
err = writeRuleItemString(writer, ruleItemSourcePortRange, rule.SourcePortRange)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(rule.Port) > 0 {
|
||||||
|
err = writeRuleItemUint16(writer, ruleItemPort, rule.Port)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(rule.PortRange) > 0 {
|
||||||
|
err = writeRuleItemString(writer, ruleItemPortRange, rule.PortRange)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(rule.ProcessName) > 0 {
|
||||||
|
err = writeRuleItemString(writer, ruleItemProcessName, rule.ProcessName)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(rule.ProcessPath) > 0 {
|
||||||
|
err = writeRuleItemString(writer, ruleItemProcessPath, rule.ProcessPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(rule.PackageName) > 0 {
|
||||||
|
err = writeRuleItemString(writer, ruleItemPackageName, rule.PackageName)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(rule.WIFISSID) > 0 {
|
||||||
|
err = writeRuleItemString(writer, ruleItemWIFISSID, rule.WIFISSID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(rule.WIFIBSSID) > 0 {
|
||||||
|
err = writeRuleItemString(writer, ruleItemWIFIBSSID, rule.WIFIBSSID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = binary.Write(writer, binary.BigEndian, ruleItemFinal)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = binary.Write(writer, binary.BigEndian, rule.Invert)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func readRuleItemString(reader io.Reader) ([]string, error) {
|
||||||
|
length, err := rw.ReadUVariant(reader)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
value := make([]string, length)
|
||||||
|
for i := uint64(0); i < length; i++ {
|
||||||
|
value[i], err = rw.ReadVString(reader)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeRuleItemString(writer io.Writer, itemType uint8, value []string) error {
|
||||||
|
err := binary.Write(writer, binary.BigEndian, itemType)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = rw.WriteUVariant(writer, uint64(len(value)))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, item := range value {
|
||||||
|
err = rw.WriteVString(writer, item)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func readRuleItemUint16(reader io.Reader) ([]uint16, error) {
|
||||||
|
length, err := rw.ReadUVariant(reader)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
value := make([]uint16, length)
|
||||||
|
for i := uint64(0); i < length; i++ {
|
||||||
|
err = binary.Read(reader, binary.BigEndian, &value[i])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeRuleItemUint16(writer io.Writer, itemType uint8, value []uint16) error {
|
||||||
|
err := binary.Write(writer, binary.BigEndian, itemType)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = rw.WriteUVariant(writer, uint64(len(value)))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, item := range value {
|
||||||
|
err = binary.Write(writer, binary.BigEndian, item)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeRuleItemCIDR(writer io.Writer, itemType uint8, value []string) error {
|
||||||
|
var builder netipx.IPSetBuilder
|
||||||
|
for i, prefixString := range value {
|
||||||
|
prefix, err := netip.ParsePrefix(prefixString)
|
||||||
|
if err == nil {
|
||||||
|
builder.AddPrefix(prefix)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
addr, addrErr := netip.ParseAddr(prefixString)
|
||||||
|
if addrErr == nil {
|
||||||
|
builder.Add(addr)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return E.Cause(err, "parse [", i, "]")
|
||||||
|
}
|
||||||
|
ipSet, err := builder.IPSet()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = binary.Write(writer, binary.BigEndian, itemType)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return writeIPSet(writer, ipSet)
|
||||||
|
}
|
||||||
|
|
||||||
|
func readLogicalRule(reader io.Reader, recovery bool) (logicalRule option.LogicalHeadlessRule, err error) {
|
||||||
|
var mode uint8
|
||||||
|
err = binary.Read(reader, binary.BigEndian, &mode)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch mode {
|
||||||
|
case 0:
|
||||||
|
logicalRule.Mode = C.LogicalTypeAnd
|
||||||
|
case 1:
|
||||||
|
logicalRule.Mode = C.LogicalTypeOr
|
||||||
|
default:
|
||||||
|
err = E.New("unknown logical mode: ", mode)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
length, err := rw.ReadUVariant(reader)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
logicalRule.Rules = make([]option.HeadlessRule, length)
|
||||||
|
for i := uint64(0); i < length; i++ {
|
||||||
|
logicalRule.Rules[i], err = readRule(reader, recovery)
|
||||||
|
if err != nil {
|
||||||
|
err = E.Cause(err, "read logical rule [", i, "]")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = binary.Read(reader, binary.BigEndian, &logicalRule.Invert)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeLogicalRule(writer io.Writer, logicalRule option.LogicalHeadlessRule) error {
|
||||||
|
err := binary.Write(writer, binary.BigEndian, uint8(1))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch logicalRule.Mode {
|
||||||
|
case C.LogicalTypeAnd:
|
||||||
|
err = binary.Write(writer, binary.BigEndian, uint8(0))
|
||||||
|
case C.LogicalTypeOr:
|
||||||
|
err = binary.Write(writer, binary.BigEndian, uint8(1))
|
||||||
|
default:
|
||||||
|
panic("unknown logical mode: " + logicalRule.Mode)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = rw.WriteUVariant(writer, uint64(len(logicalRule.Rules)))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, rule := range logicalRule.Rules {
|
||||||
|
err = writeRule(writer, rule)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = binary.Write(writer, binary.BigEndian, logicalRule.Invert)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
116
common/srs/ip_set.go
Normal file
116
common/srs/ip_set.go
Normal file
@ -0,0 +1,116 @@
|
|||||||
|
package srs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"io"
|
||||||
|
"net/netip"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/sagernet/sing/common/rw"
|
||||||
|
|
||||||
|
"go4.org/netipx"
|
||||||
|
)
|
||||||
|
|
||||||
|
type myIPSet struct {
|
||||||
|
rr []myIPRange
|
||||||
|
}
|
||||||
|
|
||||||
|
type myIPRange struct {
|
||||||
|
from netip.Addr
|
||||||
|
to netip.Addr
|
||||||
|
}
|
||||||
|
|
||||||
|
func readIPSet(reader io.Reader) (*netipx.IPSet, error) {
|
||||||
|
var version uint8
|
||||||
|
err := binary.Read(reader, binary.BigEndian, &version)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var length uint64
|
||||||
|
err = binary.Read(reader, binary.BigEndian, &length)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
mySet := &myIPSet{
|
||||||
|
rr: make([]myIPRange, length),
|
||||||
|
}
|
||||||
|
for i := uint64(0); i < length; i++ {
|
||||||
|
var (
|
||||||
|
fromLen uint64
|
||||||
|
toLen uint64
|
||||||
|
fromAddr netip.Addr
|
||||||
|
toAddr netip.Addr
|
||||||
|
)
|
||||||
|
fromLen, err = rw.ReadUVariant(reader)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
fromBytes := make([]byte, fromLen)
|
||||||
|
_, err = io.ReadFull(reader, fromBytes)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
err = fromAddr.UnmarshalBinary(fromBytes)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
toLen, err = rw.ReadUVariant(reader)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
toBytes := make([]byte, toLen)
|
||||||
|
_, err = io.ReadFull(reader, toBytes)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
err = toAddr.UnmarshalBinary(toBytes)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
mySet.rr[i] = myIPRange{fromAddr, toAddr}
|
||||||
|
}
|
||||||
|
return (*netipx.IPSet)(unsafe.Pointer(mySet)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeIPSet(writer io.Writer, set *netipx.IPSet) error {
|
||||||
|
err := binary.Write(writer, binary.BigEndian, uint8(1))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
mySet := (*myIPSet)(unsafe.Pointer(set))
|
||||||
|
err = binary.Write(writer, binary.BigEndian, uint64(len(mySet.rr)))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, rr := range mySet.rr {
|
||||||
|
var (
|
||||||
|
fromBinary []byte
|
||||||
|
toBinary []byte
|
||||||
|
)
|
||||||
|
fromBinary, err = rr.from.MarshalBinary()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = rw.WriteUVariant(writer, uint64(len(fromBinary)))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = writer.Write(fromBinary)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
toBinary, err = rr.to.MarshalBinary()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = rw.WriteUVariant(writer, uint64(len(toBinary)))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = writer.Write(toBinary)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
31
common/taskmonitor/monitor.go
Normal file
31
common/taskmonitor/monitor.go
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
package taskmonitor
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
F "github.com/sagernet/sing/common/format"
|
||||||
|
"github.com/sagernet/sing/common/logger"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Monitor struct {
|
||||||
|
logger logger.Logger
|
||||||
|
timeout time.Duration
|
||||||
|
timer *time.Timer
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(logger logger.Logger, timeout time.Duration) *Monitor {
|
||||||
|
return &Monitor{
|
||||||
|
logger: logger,
|
||||||
|
timeout: timeout,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Monitor) Start(taskName ...any) {
|
||||||
|
m.timer = time.AfterFunc(m.timeout, func() {
|
||||||
|
m.logger.Warn(F.ToString(taskName...), " take too much time to finish!")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Monitor) Finish() {
|
||||||
|
m.timer.Stop()
|
||||||
|
}
|
@ -9,3 +9,11 @@ const (
|
|||||||
LogicalTypeAnd = "and"
|
LogicalTypeAnd = "and"
|
||||||
LogicalTypeOr = "or"
|
LogicalTypeOr = "or"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
RuleSetTypeLocal = "local"
|
||||||
|
RuleSetTypeRemote = "remote"
|
||||||
|
RuleSetVersion1 = 1
|
||||||
|
RuleSetFormatSource = "source"
|
||||||
|
RuleSetFormatBinary = "binary"
|
||||||
|
)
|
||||||
|
@ -9,5 +9,9 @@ const (
|
|||||||
QUICTimeout = 30 * time.Second
|
QUICTimeout = 30 * time.Second
|
||||||
STUNTimeout = 15 * time.Second
|
STUNTimeout = 15 * time.Second
|
||||||
UDPTimeout = 5 * time.Minute
|
UDPTimeout = 5 * time.Minute
|
||||||
DefaultURLTestInterval = 1 * time.Minute
|
DefaultURLTestInterval = 3 * time.Minute
|
||||||
|
DefaultURLTestIdleTimeout = 30 * time.Minute
|
||||||
|
DefaultStartTimeout = 10 * time.Second
|
||||||
|
DefaultStopTimeout = 5 * time.Second
|
||||||
|
DefaultStopFatalTimeout = 10 * time.Second
|
||||||
)
|
)
|
||||||
|
@ -2,16 +2,106 @@
|
|||||||
icon: material/alert-decagram
|
icon: material/alert-decagram
|
||||||
---
|
---
|
||||||
|
|
||||||
# ChangeLog
|
#### 1.8.0-alpha.11
|
||||||
|
|
||||||
|
* Fixes and improvements
|
||||||
|
|
||||||
|
#### 1.8.0-alpha.10
|
||||||
|
|
||||||
|
* Add `idle_timeout` for URLTest outbound **1**
|
||||||
|
* Fixes and improvements
|
||||||
|
|
||||||
|
**1**:
|
||||||
|
|
||||||
|
When URLTest is idle for a certain period of time, the scheduled delay test will be paused.
|
||||||
|
|
||||||
#### 1.7.2
|
#### 1.7.2
|
||||||
|
|
||||||
* Fixes and improvements
|
* Fixes and improvements
|
||||||
|
|
||||||
|
#### 1.8.0-alpha.8
|
||||||
|
|
||||||
|
* Add context to JSON decode error message **1**
|
||||||
|
* Reject internal fake-ip queries **2**
|
||||||
|
* Fixes and improvements
|
||||||
|
|
||||||
|
**1**:
|
||||||
|
|
||||||
|
JSON parse errors will now include the current key path.
|
||||||
|
Only takes effect when compiled with Go 1.21+.
|
||||||
|
|
||||||
|
**2**:
|
||||||
|
|
||||||
|
All internal DNS queries now skip DNS rules with `server` type `fakeip`,
|
||||||
|
and the default DNS server can no longer be `fakeip`.
|
||||||
|
|
||||||
|
This change is intended to break incorrect usage and essentially requires no action.
|
||||||
|
|
||||||
|
#### 1.8.0-alpha.7
|
||||||
|
|
||||||
|
* Fixes and improvements
|
||||||
|
|
||||||
#### 1.7.1
|
#### 1.7.1
|
||||||
|
|
||||||
* Fixes and improvements
|
* Fixes and improvements
|
||||||
|
|
||||||
|
#### 1.8.0-alpha.6
|
||||||
|
|
||||||
|
* Fix rule-set matching logic **1**
|
||||||
|
* Fixes and improvements
|
||||||
|
|
||||||
|
**1**:
|
||||||
|
|
||||||
|
Now the rules in the `rule_set` rule item can be logically considered to be merged into the rule using rule sets,
|
||||||
|
rather than completely following the AND logic.
|
||||||
|
|
||||||
|
#### 1.8.0-alpha.5
|
||||||
|
|
||||||
|
* Parallel rule-set initialization
|
||||||
|
* Independent `source_ip_is_private` and `ip_is_private` rules **1**
|
||||||
|
|
||||||
|
**1**:
|
||||||
|
|
||||||
|
The `private` GeoIP country never existed and was actually implemented inside V2Ray.
|
||||||
|
Since GeoIP was deprecated, we made this rule independent, see [Migration](/migration/#migrate-geoip-to-rule-sets).
|
||||||
|
|
||||||
|
#### 1.8.0-alpha.1
|
||||||
|
|
||||||
|
* Migrate cache file from Clash API to independent options **1**
|
||||||
|
* Introducing [Rule Set](/configuration/rule-set) **2**
|
||||||
|
* Add `sing-box geoip`, `sing-box geosite` and `sing-box rule-set` commands **3**
|
||||||
|
* Allow nested logical rules **4**
|
||||||
|
|
||||||
|
**1**:
|
||||||
|
|
||||||
|
See [Cache File](/configuration/experimental/cache-file) and
|
||||||
|
[Migration](/migration/#migrate-cache-file-from-clash-api-to-independent-options).
|
||||||
|
|
||||||
|
**2**:
|
||||||
|
|
||||||
|
Rule set is independent collections of rules that can be compiled into binaries to improve performance.
|
||||||
|
Compared to legacy GeoIP and Geosite resources,
|
||||||
|
it can include more types of rules, load faster,
|
||||||
|
use less memory, and update automatically.
|
||||||
|
|
||||||
|
See [Route#rule_set](/configuration/route/#rule_set),
|
||||||
|
[Route Rule](/configuration/route/rule),
|
||||||
|
[DNS Rule](/configuration/dns/rule),
|
||||||
|
[Rule Set](/configuration/rule-set),
|
||||||
|
[Source Format](/configuration/rule-set/source-format) and
|
||||||
|
[Headless Rule](/configuration/rule-set/headless-rule).
|
||||||
|
|
||||||
|
For GEO resources migration, see [Migrate GeoIP to rule sets](/migration/#migrate-geoip-to-rule-sets) and
|
||||||
|
[Migrate Geosite to rule sets](/migration/#migrate-geosite-to-rule-sets).
|
||||||
|
|
||||||
|
**3**:
|
||||||
|
|
||||||
|
New commands manage GeoIP, Geosite and rule set resources, and help you migrate GEO resources to rule sets.
|
||||||
|
|
||||||
|
**4**:
|
||||||
|
|
||||||
|
Logical rules in route rules, DNS rules, and the new headless rule now allow nesting of logical rules.
|
||||||
|
|
||||||
#### 1.7.0
|
#### 1.7.0
|
||||||
|
|
||||||
* Fixes and improvements
|
* Fixes and improvements
|
||||||
@ -150,11 +240,13 @@ Only supported in graphical clients on Android and iOS.
|
|||||||
|
|
||||||
**1**:
|
**1**:
|
||||||
|
|
||||||
Starting in 1.7.0, multiplexing support is no longer enabled by default and needs to be turned on explicitly in inbound options.
|
Starting in 1.7.0, multiplexing support is no longer enabled by default and needs to be turned on explicitly in inbound
|
||||||
|
options.
|
||||||
|
|
||||||
**2**
|
**2**
|
||||||
|
|
||||||
Hysteria Brutal Congestion Control Algorithm in TCP. A kernel module needs to be installed on the Linux server, see [TCP Brutal](/configuration/shared/tcp-brutal) for details.
|
Hysteria Brutal Congestion Control Algorithm in TCP. A kernel module needs to be installed on the Linux server,
|
||||||
|
see [TCP Brutal](/configuration/shared/tcp-brutal) for details.
|
||||||
|
|
||||||
#### 1.7.0-alpha.3
|
#### 1.7.0-alpha.3
|
||||||
|
|
||||||
@ -221,8 +313,8 @@ When `auto_route` is enabled and `strict_route` is disabled, the device can now
|
|||||||
|
|
||||||
**2**:
|
**2**:
|
||||||
|
|
||||||
Built using Go 1.20, the last version that will run on Windows 7, 8, Server 2008, Server 2012 and macOS 10.13 High Sierra, 10.14 Mojave.
|
Built using Go 1.20, the last version that will run on Windows 7, 8, Server 2008, Server 2012 and macOS 10.13 High
|
||||||
|
Sierra, 10.14 Mojave.
|
||||||
|
|
||||||
#### 1.6.0-rc.4
|
#### 1.6.0-rc.4
|
||||||
|
|
||||||
@ -235,7 +327,8 @@ Built using Go 1.20, the last version that will run on Windows 7, 8, Server 2008
|
|||||||
|
|
||||||
**1**:
|
**1**:
|
||||||
|
|
||||||
Built using Go 1.20, the last version that will run on Windows 7, 8, Server 2008, Server 2012 and macOS 10.13 High Sierra, 10.14 Mojave.
|
Built using Go 1.20, the last version that will run on Windows 7, 8, Server 2008, Server 2012 and macOS 10.13 High
|
||||||
|
Sierra, 10.14 Mojave.
|
||||||
|
|
||||||
#### 1.6.0-beta.4
|
#### 1.6.0-beta.4
|
||||||
|
|
||||||
|
@ -1,3 +1,14 @@
|
|||||||
|
---
|
||||||
|
icon: material/alert-decagram
|
||||||
|
---
|
||||||
|
|
||||||
|
!!! quote "Changes in sing-box 1.8.0"
|
||||||
|
|
||||||
|
:material-plus: [rule_set](#rule_set)
|
||||||
|
:material-plus: [source_ip_is_private](#source_ip_is_private)
|
||||||
|
:material-delete-clock: [geoip](#geoip)
|
||||||
|
:material-delete-clock: [geosite](#geosite)
|
||||||
|
|
||||||
### Structure
|
### Structure
|
||||||
|
|
||||||
```json
|
```json
|
||||||
@ -46,6 +57,7 @@
|
|||||||
"10.0.0.0/24",
|
"10.0.0.0/24",
|
||||||
"192.168.0.1"
|
"192.168.0.1"
|
||||||
],
|
],
|
||||||
|
"source_ip_is_private": false,
|
||||||
"source_port": [
|
"source_port": [
|
||||||
12345
|
12345
|
||||||
],
|
],
|
||||||
@ -85,6 +97,10 @@
|
|||||||
"wifi_bssid": [
|
"wifi_bssid": [
|
||||||
"00:00:00:00:00:00"
|
"00:00:00:00:00:00"
|
||||||
],
|
],
|
||||||
|
"rule_set": [
|
||||||
|
"geoip-cn",
|
||||||
|
"geosite-cn"
|
||||||
|
],
|
||||||
"invert": false,
|
"invert": false,
|
||||||
"outbound": [
|
"outbound": [
|
||||||
"direct"
|
"direct"
|
||||||
@ -166,15 +182,29 @@ Match domain using regular expression.
|
|||||||
|
|
||||||
#### geosite
|
#### geosite
|
||||||
|
|
||||||
|
!!! failure "Deprecated in sing-box 1.8.0"
|
||||||
|
|
||||||
|
Geosite is deprecated and may be removed in the future, check [Migration](/migration/#migrate-geosite-to-rule-sets).
|
||||||
|
|
||||||
Match geosite.
|
Match geosite.
|
||||||
|
|
||||||
#### source_geoip
|
#### source_geoip
|
||||||
|
|
||||||
|
!!! failure "Deprecated in sing-box 1.8.0"
|
||||||
|
|
||||||
|
GeoIP is deprecated and may be removed in the future, check [Migration](/migration/#migrate-geoip-to-rule-sets).
|
||||||
|
|
||||||
Match source geoip.
|
Match source geoip.
|
||||||
|
|
||||||
#### source_ip_cidr
|
#### source_ip_cidr
|
||||||
|
|
||||||
Match source ip cidr.
|
Match source IP CIDR.
|
||||||
|
|
||||||
|
#### source_ip_is_private
|
||||||
|
|
||||||
|
!!! question "Since sing-box 1.8.0"
|
||||||
|
|
||||||
|
Match non-public source IP.
|
||||||
|
|
||||||
#### source_port
|
#### source_port
|
||||||
|
|
||||||
@ -250,6 +280,12 @@ Match WiFi SSID.
|
|||||||
|
|
||||||
Match WiFi BSSID.
|
Match WiFi BSSID.
|
||||||
|
|
||||||
|
#### rule_set
|
||||||
|
|
||||||
|
!!! question "Since sing-box 1.8.0"
|
||||||
|
|
||||||
|
Match [Rule Set](/configuration/route/#rule_set).
|
||||||
|
|
||||||
#### invert
|
#### invert
|
||||||
|
|
||||||
Invert match result.
|
Invert match result.
|
||||||
@ -286,4 +322,4 @@ Rewrite TTL in DNS responses.
|
|||||||
|
|
||||||
#### rules
|
#### rules
|
||||||
|
|
||||||
Included default rules.
|
Included rules.
|
@ -1,3 +1,14 @@
|
|||||||
|
---
|
||||||
|
icon: material/alert-decagram
|
||||||
|
---
|
||||||
|
|
||||||
|
!!! quote "sing-box 1.8.0 中的更改"
|
||||||
|
|
||||||
|
:material-plus: [rule_set](#rule_set)
|
||||||
|
:material-plus: [source_ip_is_private](#source_ip_is_private)
|
||||||
|
:material-delete-clock: [geoip](#geoip)
|
||||||
|
:material-delete-clock: [geosite](#geosite)
|
||||||
|
|
||||||
### 结构
|
### 结构
|
||||||
|
|
||||||
```json
|
```json
|
||||||
@ -45,6 +56,7 @@
|
|||||||
"source_ip_cidr": [
|
"source_ip_cidr": [
|
||||||
"10.0.0.0/24"
|
"10.0.0.0/24"
|
||||||
],
|
],
|
||||||
|
"source_ip_is_private": false,
|
||||||
"source_port": [
|
"source_port": [
|
||||||
12345
|
12345
|
||||||
],
|
],
|
||||||
@ -84,6 +96,10 @@
|
|||||||
"wifi_bssid": [
|
"wifi_bssid": [
|
||||||
"00:00:00:00:00:00"
|
"00:00:00:00:00:00"
|
||||||
],
|
],
|
||||||
|
"rule_set": [
|
||||||
|
"geoip-cn",
|
||||||
|
"geosite-cn"
|
||||||
|
],
|
||||||
"invert": false,
|
"invert": false,
|
||||||
"outbound": [
|
"outbound": [
|
||||||
"direct"
|
"direct"
|
||||||
@ -163,16 +179,30 @@ DNS 查询类型。值可以为整数或者类型名称字符串。
|
|||||||
|
|
||||||
#### geosite
|
#### geosite
|
||||||
|
|
||||||
匹配 GeoSite。
|
!!! failure "已在 sing-box 1.8.0 废弃"
|
||||||
|
|
||||||
|
Geosite 已废弃且可能在不久的将来移除,参阅 [迁移指南](/zh/migration/#geosite)。
|
||||||
|
|
||||||
|
匹配 Geosite。
|
||||||
|
|
||||||
#### source_geoip
|
#### source_geoip
|
||||||
|
|
||||||
|
!!! failure "已在 sing-box 1.8.0 废弃"
|
||||||
|
|
||||||
|
GeoIP 已废弃且可能在不久的将来移除,参阅 [迁移指南](/zh/migration/#geoip)。
|
||||||
|
|
||||||
匹配源 GeoIP。
|
匹配源 GeoIP。
|
||||||
|
|
||||||
#### source_ip_cidr
|
#### source_ip_cidr
|
||||||
|
|
||||||
匹配源 IP CIDR。
|
匹配源 IP CIDR。
|
||||||
|
|
||||||
|
#### source_ip_is_private
|
||||||
|
|
||||||
|
!!! question "自 sing-box 1.8.0 起"
|
||||||
|
|
||||||
|
匹配非公开源 IP。
|
||||||
|
|
||||||
#### source_port
|
#### source_port
|
||||||
|
|
||||||
匹配源端口。
|
匹配源端口。
|
||||||
@ -245,6 +275,12 @@ DNS 查询类型。值可以为整数或者类型名称字符串。
|
|||||||
|
|
||||||
匹配 WiFi BSSID。
|
匹配 WiFi BSSID。
|
||||||
|
|
||||||
|
#### rule_set
|
||||||
|
|
||||||
|
!!! question "自 sing-box 1.8.0 起"
|
||||||
|
|
||||||
|
匹配[规则集](/zh/configuration/route/#rule_set)。
|
||||||
|
|
||||||
#### invert
|
#### invert
|
||||||
|
|
||||||
反选匹配结果。
|
反选匹配结果。
|
||||||
@ -281,4 +317,4 @@ DNS 查询类型。值可以为整数或者类型名称字符串。
|
|||||||
|
|
||||||
#### rules
|
#### rules
|
||||||
|
|
||||||
包括的默认规则。
|
包括的规则。
|
@ -45,20 +45,12 @@ The address of the dns server.
|
|||||||
|
|
||||||
!!! warning ""
|
!!! warning ""
|
||||||
|
|
||||||
To ensure that system DNS is in effect, rather than Go's built-in default resolver, enable CGO at compile time.
|
To ensure that Android system DNS is in effect, rather than Go's built-in default resolver, enable CGO at compile time.
|
||||||
|
|
||||||
!!! warning ""
|
|
||||||
|
|
||||||
QUIC and HTTP3 transport is not included by default, see [Installation](./#installation).
|
|
||||||
|
|
||||||
!!! info ""
|
!!! info ""
|
||||||
|
|
||||||
the RCode transport is often used to block queries. Use with rules and the `disable_cache` rule option.
|
the RCode transport is often used to block queries. Use with rules and the `disable_cache` rule option.
|
||||||
|
|
||||||
!!! warning ""
|
|
||||||
|
|
||||||
DHCP transport is not included by default, see [Installation](./#installation).
|
|
||||||
|
|
||||||
| RCode | Description |
|
| RCode | Description |
|
||||||
|-------------------|-----------------------|
|
|-------------------|-----------------------|
|
||||||
| `success` | `No error` |
|
| `success` | `No error` |
|
||||||
|
@ -45,20 +45,12 @@ DNS 服务器的地址。
|
|||||||
|
|
||||||
!!! warning ""
|
!!! warning ""
|
||||||
|
|
||||||
为了确保系统 DNS 生效,而不是 Go 的内置默认解析器,请在编译时启用 CGO。
|
为了确保 Android 系统 DNS 生效,而不是 Go 的内置默认解析器,请在编译时启用 CGO。
|
||||||
|
|
||||||
!!! warning ""
|
|
||||||
|
|
||||||
默认安装不包含 QUIC 和 HTTP3 传输层,请参阅 [安装](/zh/#_2)。
|
|
||||||
|
|
||||||
!!! info ""
|
!!! info ""
|
||||||
|
|
||||||
RCode 传输层传输层常用于屏蔽请求. 与 DNS 规则和 `disable_cache` 规则选项一起使用。
|
RCode 传输层传输层常用于屏蔽请求. 与 DNS 规则和 `disable_cache` 规则选项一起使用。
|
||||||
|
|
||||||
!!! warning ""
|
|
||||||
|
|
||||||
默认安装不包含 DHCP 传输层,请参阅 [安装](/zh/#_2)。
|
|
||||||
|
|
||||||
| RCode | 描述 |
|
| RCode | 描述 |
|
||||||
|-------------------|----------|
|
|-------------------|----------|
|
||||||
| `success` | `无错误` |
|
| `success` | `无错误` |
|
||||||
|
34
docs/configuration/experimental/cache-file.md
Normal file
34
docs/configuration/experimental/cache-file.md
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
---
|
||||||
|
icon: material/new-box
|
||||||
|
---
|
||||||
|
|
||||||
|
!!! question "Since sing-box 1.8.0"
|
||||||
|
|
||||||
|
### Structure
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"enabled": true,
|
||||||
|
"path": "",
|
||||||
|
"cache_id": "",
|
||||||
|
"store_fakeip": false
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Fields
|
||||||
|
|
||||||
|
#### enabled
|
||||||
|
|
||||||
|
Enable cache file.
|
||||||
|
|
||||||
|
#### path
|
||||||
|
|
||||||
|
Path to the cache file.
|
||||||
|
|
||||||
|
`cache.db` will be used if empty.
|
||||||
|
|
||||||
|
#### cache_id
|
||||||
|
|
||||||
|
Identifier in cache file.
|
||||||
|
|
||||||
|
If not empty, configuration specified data will use a separate store keyed by it.
|
32
docs/configuration/experimental/cache-file.zh.md
Normal file
32
docs/configuration/experimental/cache-file.zh.md
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
---
|
||||||
|
icon: material/new-box
|
||||||
|
---
|
||||||
|
|
||||||
|
!!! question "自 sing-box 1.8.0 起"
|
||||||
|
|
||||||
|
### 结构
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"enabled": true,
|
||||||
|
"path": "",
|
||||||
|
"cache_id": "",
|
||||||
|
"store_fakeip": false
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 字段
|
||||||
|
|
||||||
|
#### enabled
|
||||||
|
|
||||||
|
启用缓存文件。
|
||||||
|
|
||||||
|
#### path
|
||||||
|
|
||||||
|
缓存文件路径,默认使用`cache.db`。
|
||||||
|
|
||||||
|
#### cache_id
|
||||||
|
|
||||||
|
缓存文件中的标识符。
|
||||||
|
|
||||||
|
如果不为空,配置特定的数据将使用由其键控的单独存储。
|
114
docs/configuration/experimental/clash-api.md
Normal file
114
docs/configuration/experimental/clash-api.md
Normal file
@ -0,0 +1,114 @@
|
|||||||
|
---
|
||||||
|
icon: material/alert-decagram
|
||||||
|
---
|
||||||
|
|
||||||
|
!!! quote "Changes in sing-box 1.8.0"
|
||||||
|
|
||||||
|
:material-delete-alert: [store_mode](#store_mode)
|
||||||
|
:material-delete-alert: [store_selected](#store_selected)
|
||||||
|
:material-delete-alert: [store_fakeip](#store_fakeip)
|
||||||
|
:material-delete-alert: [cache_file](#cache_file)
|
||||||
|
:material-delete-alert: [cache_id](#cache_id)
|
||||||
|
|
||||||
|
### Structure
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"external_controller": "127.0.0.1:9090",
|
||||||
|
"external_ui": "",
|
||||||
|
"external_ui_download_url": "",
|
||||||
|
"external_ui_download_detour": "",
|
||||||
|
"secret": "",
|
||||||
|
"default_mode": "",
|
||||||
|
|
||||||
|
// Deprecated
|
||||||
|
|
||||||
|
"store_mode": false,
|
||||||
|
"store_selected": false,
|
||||||
|
"store_fakeip": false,
|
||||||
|
"cache_file": "",
|
||||||
|
"cache_id": ""
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Fields
|
||||||
|
|
||||||
|
#### external_controller
|
||||||
|
|
||||||
|
RESTful web API listening address. Clash API will be disabled if empty.
|
||||||
|
|
||||||
|
#### external_ui
|
||||||
|
|
||||||
|
A relative path to the configuration directory or an absolute path to a
|
||||||
|
directory in which you put some static web resource. sing-box will then
|
||||||
|
serve it at `http://{{external-controller}}/ui`.
|
||||||
|
|
||||||
|
#### external_ui_download_url
|
||||||
|
|
||||||
|
ZIP download URL for the external UI, will be used if the specified `external_ui` directory is empty.
|
||||||
|
|
||||||
|
`https://github.com/MetaCubeX/Yacd-meta/archive/gh-pages.zip` will be used if empty.
|
||||||
|
|
||||||
|
#### external_ui_download_detour
|
||||||
|
|
||||||
|
The tag of the outbound to download the external UI.
|
||||||
|
|
||||||
|
Default outbound will be used if empty.
|
||||||
|
|
||||||
|
#### secret
|
||||||
|
|
||||||
|
Secret for the RESTful API (optional)
|
||||||
|
Authenticate by spedifying HTTP header `Authorization: Bearer ${secret}`
|
||||||
|
ALWAYS set a secret if RESTful API is listening on 0.0.0.0
|
||||||
|
|
||||||
|
#### default_mode
|
||||||
|
|
||||||
|
Default mode in clash, `Rule` will be used if empty.
|
||||||
|
|
||||||
|
This setting has no direct effect, but can be used in routing and DNS rules via the `clash_mode` rule item.
|
||||||
|
|
||||||
|
#### store_mode
|
||||||
|
|
||||||
|
!!! failure "Deprecated in sing-box 1.8.0"
|
||||||
|
|
||||||
|
`store_mode` is deprecated in Clash API and enabled by default if `cache_file.enabled`.
|
||||||
|
|
||||||
|
Store Clash mode in cache file.
|
||||||
|
|
||||||
|
#### store_selected
|
||||||
|
|
||||||
|
!!! failure "Deprecated in sing-box 1.8.0"
|
||||||
|
|
||||||
|
`store_selected` is deprecated in Clash API and enabled by default if `cache_file.enabled`.
|
||||||
|
|
||||||
|
!!! note ""
|
||||||
|
|
||||||
|
The tag must be set for target outbounds.
|
||||||
|
|
||||||
|
Store selected outbound for the `Selector` outbound in cache file.
|
||||||
|
|
||||||
|
#### store_fakeip
|
||||||
|
|
||||||
|
!!! failure "Deprecated in sing-box 1.8.0"
|
||||||
|
|
||||||
|
`store_selected` is deprecated in Clash API and migrated to `cache_file.store_fakeip`.
|
||||||
|
|
||||||
|
Store fakeip in cache file.
|
||||||
|
|
||||||
|
#### cache_file
|
||||||
|
|
||||||
|
!!! failure "Deprecated in sing-box 1.8.0"
|
||||||
|
|
||||||
|
`cache_file` is deprecated in Clash API and migrated to `cache_file.enabled` and `cache_file.path`.
|
||||||
|
|
||||||
|
Cache file path, `cache.db` will be used if empty.
|
||||||
|
|
||||||
|
#### cache_id
|
||||||
|
|
||||||
|
!!! failure "Deprecated in sing-box 1.8.0"
|
||||||
|
|
||||||
|
`cache_id` is deprecated in Clash API and migrated to `cache_file.cache_id`.
|
||||||
|
|
||||||
|
Identifier in cache file.
|
||||||
|
|
||||||
|
If not empty, configuration specified data will use a separate store keyed by it.
|
112
docs/configuration/experimental/clash-api.zh.md
Normal file
112
docs/configuration/experimental/clash-api.zh.md
Normal file
@ -0,0 +1,112 @@
|
|||||||
|
---
|
||||||
|
icon: material/alert-decagram
|
||||||
|
---
|
||||||
|
|
||||||
|
!!! quote "sing-box 1.8.0 中的更改"
|
||||||
|
|
||||||
|
:material-delete-alert: [store_mode](#store_mode)
|
||||||
|
:material-delete-alert: [store_selected](#store_selected)
|
||||||
|
:material-delete-alert: [store_fakeip](#store_fakeip)
|
||||||
|
:material-delete-alert: [cache_file](#cache_file)
|
||||||
|
:material-delete-alert: [cache_id](#cache_id)
|
||||||
|
|
||||||
|
### 结构
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"external_controller": "127.0.0.1:9090",
|
||||||
|
"external_ui": "",
|
||||||
|
"external_ui_download_url": "",
|
||||||
|
"external_ui_download_detour": "",
|
||||||
|
"secret": "",
|
||||||
|
"default_mode": "",
|
||||||
|
|
||||||
|
// Deprecated
|
||||||
|
|
||||||
|
"store_mode": false,
|
||||||
|
"store_selected": false,
|
||||||
|
"store_fakeip": false,
|
||||||
|
"cache_file": "",
|
||||||
|
"cache_id": ""
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Fields
|
||||||
|
|
||||||
|
#### external_controller
|
||||||
|
|
||||||
|
RESTful web API 监听地址。如果为空,则禁用 Clash API。
|
||||||
|
|
||||||
|
#### external_ui
|
||||||
|
|
||||||
|
到静态网页资源目录的相对路径或绝对路径。sing-box 会在 `http://{{external-controller}}/ui` 下提供它。
|
||||||
|
|
||||||
|
#### external_ui_download_url
|
||||||
|
|
||||||
|
静态网页资源的 ZIP 下载 URL,如果指定的 `external_ui` 目录为空,将使用。
|
||||||
|
|
||||||
|
默认使用 `https://github.com/MetaCubeX/Yacd-meta/archive/gh-pages.zip`。
|
||||||
|
|
||||||
|
#### external_ui_download_detour
|
||||||
|
|
||||||
|
用于下载静态网页资源的出站的标签。
|
||||||
|
|
||||||
|
如果为空,将使用默认出站。
|
||||||
|
|
||||||
|
#### secret
|
||||||
|
|
||||||
|
RESTful API 的密钥(可选)
|
||||||
|
通过指定 HTTP 标头 `Authorization: Bearer ${secret}` 进行身份验证
|
||||||
|
如果 RESTful API 正在监听 0.0.0.0,请始终设置一个密钥。
|
||||||
|
|
||||||
|
#### default_mode
|
||||||
|
|
||||||
|
Clash 中的默认模式,默认使用 `Rule`。
|
||||||
|
|
||||||
|
此设置没有直接影响,但可以通过 `clash_mode` 规则项在路由和 DNS 规则中使用。
|
||||||
|
|
||||||
|
#### store_mode
|
||||||
|
|
||||||
|
!!! failure "已在 sing-box 1.8.0 废弃"
|
||||||
|
|
||||||
|
`store_mode` 已在 Clash API 中废弃,且默认启用当 `cache_file.enabled`。
|
||||||
|
|
||||||
|
将 Clash 模式存储在缓存文件中。
|
||||||
|
|
||||||
|
#### store_selected
|
||||||
|
|
||||||
|
!!! failure "已在 sing-box 1.8.0 废弃"
|
||||||
|
|
||||||
|
`store_selected` 已在 Clash API 中废弃,且默认启用当 `cache_file.enabled`。
|
||||||
|
|
||||||
|
!!! note ""
|
||||||
|
|
||||||
|
必须为目标出站设置标签。
|
||||||
|
|
||||||
|
将 `Selector` 中出站的选定的目标出站存储在缓存文件中。
|
||||||
|
|
||||||
|
#### store_fakeip
|
||||||
|
|
||||||
|
!!! failure "已在 sing-box 1.8.0 废弃"
|
||||||
|
|
||||||
|
`store_selected` 已在 Clash API 中废弃,且已迁移到 `cache_file.store_fakeip`。
|
||||||
|
|
||||||
|
将 fakeip 存储在缓存文件中。
|
||||||
|
|
||||||
|
#### cache_file
|
||||||
|
|
||||||
|
!!! failure "已在 sing-box 1.8.0 废弃"
|
||||||
|
|
||||||
|
`cache_file` 已在 Clash API 中废弃,且已迁移到 `cache_file.enabled` 和 `cache_file.path`。
|
||||||
|
|
||||||
|
缓存文件路径,默认使用`cache.db`。
|
||||||
|
|
||||||
|
#### cache_id
|
||||||
|
|
||||||
|
!!! failure "已在 sing-box 1.8.0 废弃"
|
||||||
|
|
||||||
|
`cache_id` 已在 Clash API 中废弃,且已迁移到 `cache_file.cache_id`。
|
||||||
|
|
||||||
|
缓存 ID。
|
||||||
|
|
||||||
|
如果不为空,配置特定的数据将使用由其键控的单独存储。
|
@ -1,139 +1,30 @@
|
|||||||
|
---
|
||||||
|
icon: material/alert-decagram
|
||||||
|
---
|
||||||
|
|
||||||
# Experimental
|
# Experimental
|
||||||
|
|
||||||
|
!!! quote "Changes in sing-box 1.8.0"
|
||||||
|
|
||||||
|
:material-plus: [cache_file](#cache_file)
|
||||||
|
:material-alert-decagram: [clash_api](#clash_api)
|
||||||
|
|
||||||
### Structure
|
### Structure
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"experimental": {
|
"experimental": {
|
||||||
"clash_api": {
|
"cache_file": {},
|
||||||
"external_controller": "127.0.0.1:9090",
|
"clash_api": {},
|
||||||
"external_ui": "",
|
"v2ray_api": {}
|
||||||
"external_ui_download_url": "",
|
|
||||||
"external_ui_download_detour": "",
|
|
||||||
"secret": "",
|
|
||||||
"default_mode": "",
|
|
||||||
"store_mode": false,
|
|
||||||
"store_selected": false,
|
|
||||||
"store_fakeip": false,
|
|
||||||
"cache_file": "",
|
|
||||||
"cache_id": ""
|
|
||||||
},
|
|
||||||
"v2ray_api": {
|
|
||||||
"listen": "127.0.0.1:8080",
|
|
||||||
"stats": {
|
|
||||||
"enabled": true,
|
|
||||||
"inbounds": [
|
|
||||||
"socks-in"
|
|
||||||
],
|
|
||||||
"outbounds": [
|
|
||||||
"proxy",
|
|
||||||
"direct"
|
|
||||||
],
|
|
||||||
"users": [
|
|
||||||
"sekai"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! note ""
|
### Fields
|
||||||
|
|
||||||
Traffic statistics and connection management can degrade performance.
|
| Key | Format |
|
||||||
|
|--------------|----------------------------|
|
||||||
### Clash API Fields
|
| `cache_file` | [Cache File](./cache-file) |
|
||||||
|
| `clash_api` | [Clash API](./clash-api) |
|
||||||
!!! quote ""
|
| `v2ray_api` | [V2Ray API](./v2ray-api) |
|
||||||
|
|
||||||
Clash API is not included by default, see [Installation](./#installation).
|
|
||||||
|
|
||||||
#### external_controller
|
|
||||||
|
|
||||||
RESTful web API listening address. Clash API will be disabled if empty.
|
|
||||||
|
|
||||||
#### external_ui
|
|
||||||
|
|
||||||
A relative path to the configuration directory or an absolute path to a
|
|
||||||
directory in which you put some static web resource. sing-box will then
|
|
||||||
serve it at `http://{{external-controller}}/ui`.
|
|
||||||
|
|
||||||
#### external_ui_download_url
|
|
||||||
|
|
||||||
ZIP download URL for the external UI, will be used if the specified `external_ui` directory is empty.
|
|
||||||
|
|
||||||
`https://github.com/MetaCubeX/Yacd-meta/archive/gh-pages.zip` will be used if empty.
|
|
||||||
|
|
||||||
#### external_ui_download_detour
|
|
||||||
|
|
||||||
The tag of the outbound to download the external UI.
|
|
||||||
|
|
||||||
Default outbound will be used if empty.
|
|
||||||
|
|
||||||
#### secret
|
|
||||||
|
|
||||||
Secret for the RESTful API (optional)
|
|
||||||
Authenticate by spedifying HTTP header `Authorization: Bearer ${secret}`
|
|
||||||
ALWAYS set a secret if RESTful API is listening on 0.0.0.0
|
|
||||||
|
|
||||||
#### default_mode
|
|
||||||
|
|
||||||
Default mode in clash, `Rule` will be used if empty.
|
|
||||||
|
|
||||||
This setting has no direct effect, but can be used in routing and DNS rules via the `clash_mode` rule item.
|
|
||||||
|
|
||||||
#### store_mode
|
|
||||||
|
|
||||||
Store Clash mode in cache file.
|
|
||||||
|
|
||||||
#### store_selected
|
|
||||||
|
|
||||||
!!! note ""
|
|
||||||
|
|
||||||
The tag must be set for target outbounds.
|
|
||||||
|
|
||||||
Store selected outbound for the `Selector` outbound in cache file.
|
|
||||||
|
|
||||||
#### store_fakeip
|
|
||||||
|
|
||||||
Store fakeip in cache file.
|
|
||||||
|
|
||||||
#### cache_file
|
|
||||||
|
|
||||||
Cache file path, `cache.db` will be used if empty.
|
|
||||||
|
|
||||||
#### cache_id
|
|
||||||
|
|
||||||
Cache ID.
|
|
||||||
|
|
||||||
If not empty, `store_selected` will use a separate store keyed by it.
|
|
||||||
|
|
||||||
### V2Ray API Fields
|
|
||||||
|
|
||||||
!!! quote ""
|
|
||||||
|
|
||||||
V2Ray API is not included by default, see [Installation](./#installation).
|
|
||||||
|
|
||||||
#### listen
|
|
||||||
|
|
||||||
gRPC API listening address. V2Ray API will be disabled if empty.
|
|
||||||
|
|
||||||
#### stats
|
|
||||||
|
|
||||||
Traffic statistics service settings.
|
|
||||||
|
|
||||||
#### stats.enabled
|
|
||||||
|
|
||||||
Enable statistics service.
|
|
||||||
|
|
||||||
#### stats.inbounds
|
|
||||||
|
|
||||||
Inbound list to count traffic.
|
|
||||||
|
|
||||||
#### stats.outbounds
|
|
||||||
|
|
||||||
Outbound list to count traffic.
|
|
||||||
|
|
||||||
#### stats.users
|
|
||||||
|
|
||||||
User list to count traffic.
|
|
@ -1,137 +1,30 @@
|
|||||||
|
---
|
||||||
|
icon: material/alert-decagram
|
||||||
|
---
|
||||||
|
|
||||||
# 实验性
|
# 实验性
|
||||||
|
|
||||||
|
!!! quote "sing-box 1.8.0 中的更改"
|
||||||
|
|
||||||
|
:material-plus: [cache_file](#cache_file)
|
||||||
|
:material-alert-decagram: [clash_api](#clash_api)
|
||||||
|
|
||||||
### 结构
|
### 结构
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"experimental": {
|
"experimental": {
|
||||||
"clash_api": {
|
"cache_file": {},
|
||||||
"external_controller": "127.0.0.1:9090",
|
"clash_api": {},
|
||||||
"external_ui": "",
|
"v2ray_api": {}
|
||||||
"external_ui_download_url": "",
|
|
||||||
"external_ui_download_detour": "",
|
|
||||||
"secret": "",
|
|
||||||
"default_mode": "",
|
|
||||||
"store_mode": false,
|
|
||||||
"store_selected": false,
|
|
||||||
"store_fakeip": false,
|
|
||||||
"cache_file": "",
|
|
||||||
"cache_id": ""
|
|
||||||
},
|
|
||||||
"v2ray_api": {
|
|
||||||
"listen": "127.0.0.1:8080",
|
|
||||||
"stats": {
|
|
||||||
"enabled": true,
|
|
||||||
"inbounds": [
|
|
||||||
"socks-in"
|
|
||||||
],
|
|
||||||
"outbounds": [
|
|
||||||
"proxy",
|
|
||||||
"direct"
|
|
||||||
],
|
|
||||||
"users": [
|
|
||||||
"sekai"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! note ""
|
### 字段
|
||||||
|
|
||||||
流量统计和连接管理会降低性能。
|
| 键 | 格式 |
|
||||||
|
|--------------|--------------------------|
|
||||||
### Clash API 字段
|
| `cache_file` | [缓存文件](./cache-file) |
|
||||||
|
| `clash_api` | [Clash API](./clash-api) |
|
||||||
!!! quote ""
|
| `v2ray_api` | [V2Ray API](./v2ray-api) |
|
||||||
|
|
||||||
默认安装不包含 Clash API,参阅 [安装](/zh/#_2)。
|
|
||||||
|
|
||||||
#### external_controller
|
|
||||||
|
|
||||||
RESTful web API 监听地址。如果为空,则禁用 Clash API。
|
|
||||||
|
|
||||||
#### external_ui
|
|
||||||
|
|
||||||
到静态网页资源目录的相对路径或绝对路径。sing-box 会在 `http://{{external-controller}}/ui` 下提供它。
|
|
||||||
|
|
||||||
#### external_ui_download_url
|
|
||||||
|
|
||||||
静态网页资源的 ZIP 下载 URL,如果指定的 `external_ui` 目录为空,将使用。
|
|
||||||
|
|
||||||
默认使用 `https://github.com/MetaCubeX/Yacd-meta/archive/gh-pages.zip`。
|
|
||||||
|
|
||||||
#### external_ui_download_detour
|
|
||||||
|
|
||||||
用于下载静态网页资源的出站的标签。
|
|
||||||
|
|
||||||
如果为空,将使用默认出站。
|
|
||||||
|
|
||||||
#### secret
|
|
||||||
|
|
||||||
RESTful API 的密钥(可选)
|
|
||||||
通过指定 HTTP 标头 `Authorization: Bearer ${secret}` 进行身份验证
|
|
||||||
如果 RESTful API 正在监听 0.0.0.0,请始终设置一个密钥。
|
|
||||||
|
|
||||||
#### default_mode
|
|
||||||
|
|
||||||
Clash 中的默认模式,默认使用 `Rule`。
|
|
||||||
|
|
||||||
此设置没有直接影响,但可以通过 `clash_mode` 规则项在路由和 DNS 规则中使用。
|
|
||||||
|
|
||||||
#### store_mode
|
|
||||||
|
|
||||||
将 Clash 模式存储在缓存文件中。
|
|
||||||
|
|
||||||
#### store_selected
|
|
||||||
|
|
||||||
!!! note ""
|
|
||||||
|
|
||||||
必须为目标出站设置标签。
|
|
||||||
|
|
||||||
将 `Selector` 中出站的选定的目标出站存储在缓存文件中。
|
|
||||||
|
|
||||||
#### store_fakeip
|
|
||||||
|
|
||||||
将 fakeip 存储在缓存文件中。
|
|
||||||
|
|
||||||
#### cache_file
|
|
||||||
|
|
||||||
缓存文件路径,默认使用`cache.db`。
|
|
||||||
|
|
||||||
#### cache_id
|
|
||||||
|
|
||||||
缓存 ID。
|
|
||||||
|
|
||||||
如果不为空,`store_selected` 将会使用以此为键的独立存储。
|
|
||||||
|
|
||||||
### V2Ray API 字段
|
|
||||||
|
|
||||||
!!! quote ""
|
|
||||||
|
|
||||||
默认安装不包含 V2Ray API,参阅 [安装](/zh/#_2)。
|
|
||||||
|
|
||||||
#### listen
|
|
||||||
|
|
||||||
gRPC API 监听地址。如果为空,则禁用 V2Ray API。
|
|
||||||
|
|
||||||
#### stats
|
|
||||||
|
|
||||||
流量统计服务设置。
|
|
||||||
|
|
||||||
#### stats.enabled
|
|
||||||
|
|
||||||
启用统计服务。
|
|
||||||
|
|
||||||
#### stats.inbounds
|
|
||||||
|
|
||||||
统计流量的入站列表。
|
|
||||||
|
|
||||||
#### stats.outbounds
|
|
||||||
|
|
||||||
统计流量的出站列表。
|
|
||||||
|
|
||||||
#### stats.users
|
|
||||||
|
|
||||||
统计流量的用户列表。
|
|
50
docs/configuration/experimental/v2ray-api.md
Normal file
50
docs/configuration/experimental/v2ray-api.md
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
!!! quote ""
|
||||||
|
|
||||||
|
V2Ray API is not included by default, see [Installation](/installation/build-from-source/#build-tags).
|
||||||
|
|
||||||
|
### Structure
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"listen": "127.0.0.1:8080",
|
||||||
|
"stats": {
|
||||||
|
"enabled": true,
|
||||||
|
"inbounds": [
|
||||||
|
"socks-in"
|
||||||
|
],
|
||||||
|
"outbounds": [
|
||||||
|
"proxy",
|
||||||
|
"direct"
|
||||||
|
],
|
||||||
|
"users": [
|
||||||
|
"sekai"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Fields
|
||||||
|
|
||||||
|
#### listen
|
||||||
|
|
||||||
|
gRPC API listening address. V2Ray API will be disabled if empty.
|
||||||
|
|
||||||
|
#### stats
|
||||||
|
|
||||||
|
Traffic statistics service settings.
|
||||||
|
|
||||||
|
#### stats.enabled
|
||||||
|
|
||||||
|
Enable statistics service.
|
||||||
|
|
||||||
|
#### stats.inbounds
|
||||||
|
|
||||||
|
Inbound list to count traffic.
|
||||||
|
|
||||||
|
#### stats.outbounds
|
||||||
|
|
||||||
|
Outbound list to count traffic.
|
||||||
|
|
||||||
|
#### stats.users
|
||||||
|
|
||||||
|
User list to count traffic.
|
50
docs/configuration/experimental/v2ray-api.zh.md
Normal file
50
docs/configuration/experimental/v2ray-api.zh.md
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
!!! quote ""
|
||||||
|
|
||||||
|
默认安装不包含 V2Ray API,参阅 [安装](/zh/installation/build-from-source/#_5)。
|
||||||
|
|
||||||
|
### 结构
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"listen": "127.0.0.1:8080",
|
||||||
|
"stats": {
|
||||||
|
"enabled": true,
|
||||||
|
"inbounds": [
|
||||||
|
"socks-in"
|
||||||
|
],
|
||||||
|
"outbounds": [
|
||||||
|
"proxy",
|
||||||
|
"direct"
|
||||||
|
],
|
||||||
|
"users": [
|
||||||
|
"sekai"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 字段
|
||||||
|
|
||||||
|
#### listen
|
||||||
|
|
||||||
|
gRPC API 监听地址。如果为空,则禁用 V2Ray API。
|
||||||
|
|
||||||
|
#### stats
|
||||||
|
|
||||||
|
流量统计服务设置。
|
||||||
|
|
||||||
|
#### stats.enabled
|
||||||
|
|
||||||
|
启用统计服务。
|
||||||
|
|
||||||
|
#### stats.inbounds
|
||||||
|
|
||||||
|
统计流量的入站列表。
|
||||||
|
|
||||||
|
#### stats.outbounds
|
||||||
|
|
||||||
|
统计流量的出站列表。
|
||||||
|
|
||||||
|
#### stats.users
|
||||||
|
|
||||||
|
统计流量的用户列表。
|
@ -29,10 +29,6 @@
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! warning ""
|
|
||||||
|
|
||||||
QUIC, which is required by hysteria is not included by default, see [Installation](./#installation).
|
|
||||||
|
|
||||||
### Listen Fields
|
### Listen Fields
|
||||||
|
|
||||||
See [Listen Fields](/configuration/shared/listen) for details.
|
See [Listen Fields](/configuration/shared/listen) for details.
|
||||||
|
@ -29,10 +29,6 @@
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! warning ""
|
|
||||||
|
|
||||||
默认安装不包含被 Hysteria 依赖的 QUIC,参阅 [安装](/zh/#_2)。
|
|
||||||
|
|
||||||
### 监听字段
|
### 监听字段
|
||||||
|
|
||||||
参阅 [监听字段](/zh/configuration/shared/listen/)。
|
参阅 [监听字段](/zh/configuration/shared/listen/)。
|
||||||
|
@ -26,10 +26,6 @@
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! warning ""
|
|
||||||
|
|
||||||
QUIC, which is required by Hysteria2 is not included by default, see [Installation](./#installation).
|
|
||||||
|
|
||||||
!!! warning "Difference from official Hysteria2"
|
!!! warning "Difference from official Hysteria2"
|
||||||
|
|
||||||
The official program supports an authentication method called **userpass**,
|
The official program supports an authentication method called **userpass**,
|
||||||
|
@ -26,10 +26,6 @@
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! warning ""
|
|
||||||
|
|
||||||
默认安装不包含被 Hysteria2 依赖的 QUIC,参阅 [安装](/zh/#_2)。
|
|
||||||
|
|
||||||
!!! warning "与官方 Hysteria2 的区别"
|
!!! warning "与官方 Hysteria2 的区别"
|
||||||
|
|
||||||
官方程序支持一种名为 **userpass** 的验证方式,
|
官方程序支持一种名为 **userpass** 的验证方式,
|
||||||
|
@ -18,10 +18,6 @@
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! warning ""
|
|
||||||
|
|
||||||
HTTP3 transport is not included by default, see [Installation](./#installation).
|
|
||||||
|
|
||||||
### Listen Fields
|
### Listen Fields
|
||||||
|
|
||||||
See [Listen Fields](/configuration/shared/listen) for details.
|
See [Listen Fields](/configuration/shared/listen) for details.
|
||||||
|
@ -18,10 +18,6 @@
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! warning ""
|
|
||||||
|
|
||||||
默认安装不包含 HTTP3 传输层, 参阅 [安装](/zh/#_2)。
|
|
||||||
|
|
||||||
### 监听字段
|
### 监听字段
|
||||||
|
|
||||||
参阅 [监听字段](/zh/configuration/shared/listen/)。
|
参阅 [监听字段](/zh/configuration/shared/listen/)。
|
||||||
|
@ -22,10 +22,6 @@
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! warning ""
|
|
||||||
|
|
||||||
QUIC, which is required by TUIC is not included by default, see [Installation](./#installation).
|
|
||||||
|
|
||||||
### Listen Fields
|
### Listen Fields
|
||||||
|
|
||||||
See [Listen Fields](/configuration/shared/listen) for details.
|
See [Listen Fields](/configuration/shared/listen) for details.
|
||||||
|
@ -22,10 +22,6 @@
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! warning ""
|
|
||||||
|
|
||||||
默认安装不包含被 TUI 依赖的 QUIC,参阅 [安装](/zh/#_2)。
|
|
||||||
|
|
||||||
### 监听字段
|
### 监听字段
|
||||||
|
|
||||||
参阅 [监听字段](/zh/configuration/shared/listen/)。
|
参阅 [监听字段](/zh/configuration/shared/listen/)。
|
||||||
|
@ -171,7 +171,7 @@ TCP/IP stack.
|
|||||||
|
|
||||||
!!! warning ""
|
!!! warning ""
|
||||||
|
|
||||||
gVisor and LWIP stacks is not included by default, see [Installation](./#installation).
|
LWIP stacks is not included by default, see [Installation](/installation/build-from-source/#build-tags).
|
||||||
|
|
||||||
#### include_interface
|
#### include_interface
|
||||||
|
|
||||||
|
@ -167,7 +167,7 @@ TCP/IP 栈。
|
|||||||
|
|
||||||
!!! warning ""
|
!!! warning ""
|
||||||
|
|
||||||
默认安装不包含 gVisor 和 LWIP 栈,请参阅 [安装](/zh/#_2)。
|
默认安装不包含 LWIP 栈,参阅 [安装](/zh/installation/build-from-source/#_5)。
|
||||||
|
|
||||||
#### include_interface
|
#### include_interface
|
||||||
|
|
||||||
|
@ -24,10 +24,6 @@
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! warning ""
|
|
||||||
|
|
||||||
QUIC, which is required by hysteria is not included by default, see [Installation](./#installation).
|
|
||||||
|
|
||||||
### Fields
|
### Fields
|
||||||
|
|
||||||
#### server
|
#### server
|
||||||
|
@ -24,10 +24,6 @@
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! warning ""
|
|
||||||
|
|
||||||
默认安装不包含被 Hysteria 依赖的 QUIC,参阅 [安装](/zh/#_2)。
|
|
||||||
|
|
||||||
### 字段
|
### 字段
|
||||||
|
|
||||||
#### server
|
#### server
|
||||||
|
@ -22,10 +22,6 @@
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! warning ""
|
|
||||||
|
|
||||||
QUIC, which is required by Hysteria2 is not included by default, see [Installation](./#installation).
|
|
||||||
|
|
||||||
!!! warning "Difference from official Hysteria2"
|
!!! warning "Difference from official Hysteria2"
|
||||||
|
|
||||||
The official Hysteria2 supports an authentication method called **userpass**,
|
The official Hysteria2 supports an authentication method called **userpass**,
|
||||||
|
@ -22,10 +22,6 @@
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! warning ""
|
|
||||||
|
|
||||||
默认安装不包含被 Hysteria2 依赖的 QUIC,参阅 [安装](/zh/#_2)。
|
|
||||||
|
|
||||||
!!! warning "与官方 Hysteria2 的区别"
|
!!! warning "与官方 Hysteria2 的区别"
|
||||||
|
|
||||||
官方程序支持一种名为 **userpass** 的验证方式,
|
官方程序支持一种名为 **userpass** 的验证方式,
|
||||||
|
@ -26,7 +26,6 @@
|
|||||||
| `trojan` | [Trojan](./trojan) |
|
| `trojan` | [Trojan](./trojan) |
|
||||||
| `wireguard` | [Wireguard](./wireguard) |
|
| `wireguard` | [Wireguard](./wireguard) |
|
||||||
| `hysteria` | [Hysteria](./hysteria) |
|
| `hysteria` | [Hysteria](./hysteria) |
|
||||||
| `shadowsocksr` | [ShadowsocksR](./shadowsocksr) |
|
|
||||||
| `vless` | [VLESS](./vless) |
|
| `vless` | [VLESS](./vless) |
|
||||||
| `shadowtls` | [ShadowTLS](./shadowtls) |
|
| `shadowtls` | [ShadowTLS](./shadowtls) |
|
||||||
| `tuic` | [TUIC](./tuic) |
|
| `tuic` | [TUIC](./tuic) |
|
||||||
|
@ -26,7 +26,6 @@
|
|||||||
| `trojan` | [Trojan](./trojan) |
|
| `trojan` | [Trojan](./trojan) |
|
||||||
| `wireguard` | [Wireguard](./wireguard) |
|
| `wireguard` | [Wireguard](./wireguard) |
|
||||||
| `hysteria` | [Hysteria](./hysteria) |
|
| `hysteria` | [Hysteria](./hysteria) |
|
||||||
| `shadowsocksr` | [ShadowsocksR](./shadowsocksr) |
|
|
||||||
| `vless` | [VLESS](./vless) |
|
| `vless` | [VLESS](./vless) |
|
||||||
| `shadowtls` | [ShadowTLS](./shadowtls) |
|
| `shadowtls` | [ShadowTLS](./shadowtls) |
|
||||||
| `tuic` | [TUIC](./tuic) |
|
| `tuic` | [TUIC](./tuic) |
|
||||||
|
@ -1,106 +0,0 @@
|
|||||||
### Structure
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"type": "shadowsocksr",
|
|
||||||
"tag": "ssr-out",
|
|
||||||
|
|
||||||
"server": "127.0.0.1",
|
|
||||||
"server_port": 1080,
|
|
||||||
"method": "aes-128-cfb",
|
|
||||||
"password": "8JCsPssfgS8tiRwiMlhARg==",
|
|
||||||
"obfs": "plain",
|
|
||||||
"obfs_param": "",
|
|
||||||
"protocol": "origin",
|
|
||||||
"protocol_param": "",
|
|
||||||
"network": "udp",
|
|
||||||
|
|
||||||
... // Dial Fields
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
!!! warning ""
|
|
||||||
|
|
||||||
The ShadowsocksR protocol is obsolete and unmaintained. This outbound is provided for compatibility only.
|
|
||||||
|
|
||||||
!!! warning ""
|
|
||||||
|
|
||||||
ShadowsocksR is not included by default, see [Installation](./#installation).
|
|
||||||
|
|
||||||
### Fields
|
|
||||||
|
|
||||||
#### server
|
|
||||||
|
|
||||||
==Required==
|
|
||||||
|
|
||||||
The server address.
|
|
||||||
|
|
||||||
#### server_port
|
|
||||||
|
|
||||||
==Required==
|
|
||||||
|
|
||||||
The server port.
|
|
||||||
|
|
||||||
#### method
|
|
||||||
|
|
||||||
==Required==
|
|
||||||
|
|
||||||
Encryption methods:
|
|
||||||
|
|
||||||
* `aes-128-ctr`
|
|
||||||
* `aes-192-ctr`
|
|
||||||
* `aes-256-ctr`
|
|
||||||
* `aes-128-cfb`
|
|
||||||
* `aes-192-cfb`
|
|
||||||
* `aes-256-cfb`
|
|
||||||
* `rc4-md5`
|
|
||||||
* `chacha20-ietf`
|
|
||||||
* `xchacha20`
|
|
||||||
|
|
||||||
#### password
|
|
||||||
|
|
||||||
==Required==
|
|
||||||
|
|
||||||
The shadowsocks password.
|
|
||||||
|
|
||||||
#### obfs
|
|
||||||
|
|
||||||
The ShadowsocksR obfuscate.
|
|
||||||
|
|
||||||
* plain
|
|
||||||
* http_simple
|
|
||||||
* http_post
|
|
||||||
* random_head
|
|
||||||
* tls1.2_ticket_auth
|
|
||||||
|
|
||||||
#### obfs_param
|
|
||||||
|
|
||||||
The ShadowsocksR obfuscate parameter.
|
|
||||||
|
|
||||||
#### protocol
|
|
||||||
|
|
||||||
The ShadowsocksR protocol.
|
|
||||||
|
|
||||||
* origin
|
|
||||||
* verify_sha1
|
|
||||||
* auth_sha1_v4
|
|
||||||
* auth_aes128_md5
|
|
||||||
* auth_aes128_sha1
|
|
||||||
* auth_chain_a
|
|
||||||
* auth_chain_b
|
|
||||||
|
|
||||||
#### protocol_param
|
|
||||||
|
|
||||||
The ShadowsocksR protocol parameter.
|
|
||||||
|
|
||||||
#### network
|
|
||||||
|
|
||||||
Enabled network
|
|
||||||
|
|
||||||
One of `tcp` `udp`.
|
|
||||||
|
|
||||||
Both is enabled by default.
|
|
||||||
|
|
||||||
### Dial Fields
|
|
||||||
|
|
||||||
See [Dial Fields](/configuration/shared/dial) for details.
|
|
@ -1,106 +0,0 @@
|
|||||||
### 结构
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"type": "shadowsocksr",
|
|
||||||
"tag": "ssr-out",
|
|
||||||
|
|
||||||
"server": "127.0.0.1",
|
|
||||||
"server_port": 1080,
|
|
||||||
"method": "aes-128-cfb",
|
|
||||||
"password": "8JCsPssfgS8tiRwiMlhARg==",
|
|
||||||
"obfs": "plain",
|
|
||||||
"obfs_param": "",
|
|
||||||
"protocol": "origin",
|
|
||||||
"protocol_param": "",
|
|
||||||
"network": "udp",
|
|
||||||
|
|
||||||
... // 拨号字段
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
!!! warning ""
|
|
||||||
|
|
||||||
ShadowsocksR 协议已过时且无人维护。 提供此出站仅出于兼容性目的。
|
|
||||||
|
|
||||||
!!! warning ""
|
|
||||||
|
|
||||||
默认安装不包含被 ShadowsocksR,参阅 [安装](/zh/#_2)。
|
|
||||||
|
|
||||||
### 字段
|
|
||||||
|
|
||||||
#### server
|
|
||||||
|
|
||||||
==必填==
|
|
||||||
|
|
||||||
服务器地址。
|
|
||||||
|
|
||||||
#### server_port
|
|
||||||
|
|
||||||
==必填==
|
|
||||||
|
|
||||||
服务器端口。
|
|
||||||
|
|
||||||
#### method
|
|
||||||
|
|
||||||
==必填==
|
|
||||||
|
|
||||||
加密方法:
|
|
||||||
|
|
||||||
* `aes-128-ctr`
|
|
||||||
* `aes-192-ctr`
|
|
||||||
* `aes-256-ctr`
|
|
||||||
* `aes-128-cfb`
|
|
||||||
* `aes-192-cfb`
|
|
||||||
* `aes-256-cfb`
|
|
||||||
* `rc4-md5`
|
|
||||||
* `chacha20-ietf`
|
|
||||||
* `xchacha20`
|
|
||||||
|
|
||||||
#### password
|
|
||||||
|
|
||||||
==必填==
|
|
||||||
|
|
||||||
Shadowsocks 密码。
|
|
||||||
|
|
||||||
#### obfs
|
|
||||||
|
|
||||||
ShadowsocksR 混淆。
|
|
||||||
|
|
||||||
* plain
|
|
||||||
* http_simple
|
|
||||||
* http_post
|
|
||||||
* random_head
|
|
||||||
* tls1.2_ticket_auth
|
|
||||||
|
|
||||||
#### obfs_param
|
|
||||||
|
|
||||||
ShadowsocksR 混淆参数。
|
|
||||||
|
|
||||||
#### protocol
|
|
||||||
|
|
||||||
ShadowsocksR 协议。
|
|
||||||
|
|
||||||
* origin
|
|
||||||
* verify_sha1
|
|
||||||
* auth_sha1_v4
|
|
||||||
* auth_aes128_md5
|
|
||||||
* auth_aes128_sha1
|
|
||||||
* auth_chain_a
|
|
||||||
* auth_chain_b
|
|
||||||
|
|
||||||
#### protocol_param
|
|
||||||
|
|
||||||
ShadowsocksR 协议参数。
|
|
||||||
|
|
||||||
#### network
|
|
||||||
|
|
||||||
启用的网络协议
|
|
||||||
|
|
||||||
`tcp` 或 `udp`。
|
|
||||||
|
|
||||||
默认所有。
|
|
||||||
|
|
||||||
### 拨号字段
|
|
||||||
|
|
||||||
参阅 [拨号字段](/zh/configuration/shared/dial/)。
|
|
@ -18,7 +18,7 @@
|
|||||||
|
|
||||||
!!! info ""
|
!!! info ""
|
||||||
|
|
||||||
Embedded tor is not included by default, see [Installation](./#installation).
|
Embedded Tor is not included by default, see [Installation](/installation/build-from-source/#build-tags).
|
||||||
|
|
||||||
### Fields
|
### Fields
|
||||||
|
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
|
|
||||||
!!! info ""
|
!!! info ""
|
||||||
|
|
||||||
默认安装不包含嵌入式 Tor, 参阅 [安装](/zh/#_2)。
|
默认安装不包含嵌入式 Tor, 参阅 [安装](/zh/installation/build-from-source/#_5)。
|
||||||
|
|
||||||
### 字段
|
### 字段
|
||||||
|
|
||||||
|
@ -21,10 +21,6 @@
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! warning ""
|
|
||||||
|
|
||||||
QUIC, which is required by TUIC is not included by default, see [Installation](./#installation).
|
|
||||||
|
|
||||||
### Fields
|
### Fields
|
||||||
|
|
||||||
#### server
|
#### server
|
||||||
|
@ -21,10 +21,6 @@
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! warning ""
|
|
||||||
|
|
||||||
默认安装不包含被 TUI 依赖的 QUIC,参阅 [安装](/zh/#_2)。
|
|
||||||
|
|
||||||
### 字段
|
### 字段
|
||||||
|
|
||||||
#### server
|
#### server
|
||||||
|
@ -10,9 +10,10 @@
|
|||||||
"proxy-b",
|
"proxy-b",
|
||||||
"proxy-c"
|
"proxy-c"
|
||||||
],
|
],
|
||||||
"url": "https://www.gstatic.com/generate_204",
|
"url": "",
|
||||||
"interval": "1m",
|
"interval": "",
|
||||||
"tolerance": 50,
|
"tolerance": 0,
|
||||||
|
"idle_timeout": "",
|
||||||
"interrupt_exist_connections": false
|
"interrupt_exist_connections": false
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
@ -31,12 +32,16 @@ The URL to test. `https://www.gstatic.com/generate_204` will be used if empty.
|
|||||||
|
|
||||||
#### interval
|
#### interval
|
||||||
|
|
||||||
The test interval. `1m` will be used if empty.
|
The test interval. `3m` will be used if empty.
|
||||||
|
|
||||||
#### tolerance
|
#### tolerance
|
||||||
|
|
||||||
The test tolerance in milliseconds. `50` will be used if empty.
|
The test tolerance in milliseconds. `50` will be used if empty.
|
||||||
|
|
||||||
|
#### idle_timeout
|
||||||
|
|
||||||
|
The idle timeout. `30m` will be used if empty.
|
||||||
|
|
||||||
#### interrupt_exist_connections
|
#### interrupt_exist_connections
|
||||||
|
|
||||||
Interrupt existing connections when the selected outbound has changed.
|
Interrupt existing connections when the selected outbound has changed.
|
||||||
|
@ -10,9 +10,10 @@
|
|||||||
"proxy-b",
|
"proxy-b",
|
||||||
"proxy-c"
|
"proxy-c"
|
||||||
],
|
],
|
||||||
"url": "https://www.gstatic.com/generate_204",
|
"url": "",
|
||||||
"interval": "1m",
|
"interval": "",
|
||||||
"tolerance": 50,
|
"tolerance": 50,
|
||||||
|
"idle_timeout": "",
|
||||||
"interrupt_exist_connections": false
|
"interrupt_exist_connections": false
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
@ -31,12 +32,16 @@
|
|||||||
|
|
||||||
#### interval
|
#### interval
|
||||||
|
|
||||||
测试间隔。 默认使用 `1m`。
|
测试间隔。 默认使用 `3m`。
|
||||||
|
|
||||||
#### tolerance
|
#### tolerance
|
||||||
|
|
||||||
以毫秒为单位的测试容差。 默认使用 `50`。
|
以毫秒为单位的测试容差。 默认使用 `50`。
|
||||||
|
|
||||||
|
#### idle_timeout
|
||||||
|
|
||||||
|
空闲超时。默认使用 `30m`。
|
||||||
|
|
||||||
#### interrupt_exist_connections
|
#### interrupt_exist_connections
|
||||||
|
|
||||||
当选定的出站发生更改时,中断现有连接。
|
当选定的出站发生更改时,中断现有连接。
|
||||||
|
@ -36,14 +36,6 @@
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! warning ""
|
|
||||||
|
|
||||||
WireGuard is not included by default, see [Installation](./#installation).
|
|
||||||
|
|
||||||
!!! warning ""
|
|
||||||
|
|
||||||
gVisor, which is required by the unprivileged WireGuard is not included by default, see [Installation](./#installation).
|
|
||||||
|
|
||||||
### Fields
|
### Fields
|
||||||
|
|
||||||
#### server
|
#### server
|
||||||
|
@ -24,14 +24,6 @@
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! warning ""
|
|
||||||
|
|
||||||
默认安装不包含 WireGuard, 参阅 [安装](/zh/#_2)。
|
|
||||||
|
|
||||||
!!! warning ""
|
|
||||||
|
|
||||||
默认安装不包含被非特权 WireGuard 需要的 gVisor, 参阅 [安装](/zh/#_2)。
|
|
||||||
|
|
||||||
### 字段
|
### 字段
|
||||||
|
|
||||||
#### server
|
#### server
|
||||||
|
@ -1,3 +1,11 @@
|
|||||||
|
---
|
||||||
|
icon: material/delete-clock
|
||||||
|
---
|
||||||
|
|
||||||
|
!!! failure "Deprecated in sing-box 1.8.0"
|
||||||
|
|
||||||
|
GeoIP is deprecated and may be removed in the future, check [Migration](/migration/#migrate-geoip-to-rule-sets).
|
||||||
|
|
||||||
### Structure
|
### Structure
|
||||||
|
|
||||||
```json
|
```json
|
||||||
|
@ -1,3 +1,11 @@
|
|||||||
|
---
|
||||||
|
icon: material/delete-clock
|
||||||
|
---
|
||||||
|
|
||||||
|
!!! failure "已在 sing-box 1.8.0 废弃"
|
||||||
|
|
||||||
|
GeoIP 已废弃且可能在不久的将来移除,参阅 [迁移指南](/zh/migration/#geoip)。
|
||||||
|
|
||||||
### 结构
|
### 结构
|
||||||
|
|
||||||
```json
|
```json
|
||||||
|
@ -1,3 +1,11 @@
|
|||||||
|
---
|
||||||
|
icon: material/delete-clock
|
||||||
|
---
|
||||||
|
|
||||||
|
!!! failure "Deprecated in sing-box 1.8.0"
|
||||||
|
|
||||||
|
Geosite is deprecated and may be removed in the future, check [Migration](/migration/#migrate-geosite-to-rule-sets).
|
||||||
|
|
||||||
### Structure
|
### Structure
|
||||||
|
|
||||||
```json
|
```json
|
||||||
|
@ -1,3 +1,11 @@
|
|||||||
|
---
|
||||||
|
icon: material/delete-clock
|
||||||
|
---
|
||||||
|
|
||||||
|
!!! failure "已在 sing-box 1.8.0 废弃"
|
||||||
|
|
||||||
|
Geosite 已废弃且可能在不久的将来移除,参阅 [迁移指南](/zh/migration/#geosite)。
|
||||||
|
|
||||||
### 结构
|
### 结构
|
||||||
|
|
||||||
```json
|
```json
|
||||||
|
@ -1,5 +1,15 @@
|
|||||||
|
---
|
||||||
|
icon: material/alert-decagram
|
||||||
|
---
|
||||||
|
|
||||||
# Route
|
# Route
|
||||||
|
|
||||||
|
!!! quote "Changes in sing-box 1.8.0"
|
||||||
|
|
||||||
|
:material-plus: [rule_set](#rule_set)
|
||||||
|
:material-delete-clock: [geoip](#geoip)
|
||||||
|
:material-delete-clock: [geosite](#geosite)
|
||||||
|
|
||||||
### Structure
|
### Structure
|
||||||
|
|
||||||
```json
|
```json
|
||||||
@ -8,6 +18,7 @@
|
|||||||
"geoip": {},
|
"geoip": {},
|
||||||
"geosite": {},
|
"geosite": {},
|
||||||
"rules": [],
|
"rules": [],
|
||||||
|
"rule_set": [],
|
||||||
"final": "",
|
"final": "",
|
||||||
"auto_detect_interface": false,
|
"auto_detect_interface": false,
|
||||||
"override_android_vpn": false,
|
"override_android_vpn": false,
|
||||||
@ -20,10 +31,19 @@
|
|||||||
### Fields
|
### Fields
|
||||||
|
|
||||||
| Key | Format |
|
| Key | Format |
|
||||||
|------------|------------------------------------|
|
|-----------|----------------------|
|
||||||
| `geoip` | [GeoIP](./geoip) |
|
| `geoip` | [GeoIP](./geoip) |
|
||||||
| `geosite` | [Geosite](./geosite) |
|
| `geosite` | [Geosite](./geosite) |
|
||||||
| `rules` | List of [Route Rule](./rule) |
|
|
||||||
|
#### rules
|
||||||
|
|
||||||
|
List of [Route Rule](./rule)
|
||||||
|
|
||||||
|
#### rule_set
|
||||||
|
|
||||||
|
!!! question "Since sing-box 1.8.0"
|
||||||
|
|
||||||
|
List of [Rule Set](/configuration/rule-set)
|
||||||
|
|
||||||
#### final
|
#### final
|
||||||
|
|
||||||
|
@ -1,5 +1,15 @@
|
|||||||
|
---
|
||||||
|
icon: material/alert-decagram
|
||||||
|
---
|
||||||
|
|
||||||
# 路由
|
# 路由
|
||||||
|
|
||||||
|
!!! quote "sing-box 1.8.0 中的更改"
|
||||||
|
|
||||||
|
:material-plus: [rule_set](#rule_set)
|
||||||
|
:material-delete-clock: [geoip](#geoip)
|
||||||
|
:material-delete-clock: [geosite](#geosite)
|
||||||
|
|
||||||
### 结构
|
### 结构
|
||||||
|
|
||||||
```json
|
```json
|
||||||
@ -7,8 +17,8 @@
|
|||||||
"route": {
|
"route": {
|
||||||
"geoip": {},
|
"geoip": {},
|
||||||
"geosite": {},
|
"geosite": {},
|
||||||
"ip_rules": [],
|
|
||||||
"rules": [],
|
"rules": [],
|
||||||
|
"rule_set": [],
|
||||||
"final": "",
|
"final": "",
|
||||||
"auto_detect_interface": false,
|
"auto_detect_interface": false,
|
||||||
"override_android_vpn": false,
|
"override_android_vpn": false,
|
||||||
@ -21,10 +31,20 @@
|
|||||||
### 字段
|
### 字段
|
||||||
|
|
||||||
| 键 | 格式 |
|
| 键 | 格式 |
|
||||||
|------------|-------------------------|
|
|------------|-----------------------------------|
|
||||||
| `geoip` | [GeoIP](./geoip) |
|
| `geoip` | [GeoIP](./geoip) |
|
||||||
| `geosite` | [GeoSite](./geosite) |
|
| `geosite` | [Geosite](./geosite) |
|
||||||
| `rules` | 一组 [路由规则](./rule) |
|
|
||||||
|
|
||||||
|
#### rule
|
||||||
|
|
||||||
|
一组 [路由规则](./rule)。
|
||||||
|
|
||||||
|
#### rule_set
|
||||||
|
|
||||||
|
!!! question "自 sing-box 1.8.0 起"
|
||||||
|
|
||||||
|
一组 [规则集](/configuration/rule-set)。
|
||||||
|
|
||||||
#### final
|
#### final
|
||||||
|
|
||||||
|
@ -1,3 +1,17 @@
|
|||||||
|
---
|
||||||
|
icon: material/alert-decagram
|
||||||
|
---
|
||||||
|
|
||||||
|
!!! quote "Changes in sing-box 1.8.0"
|
||||||
|
|
||||||
|
:material-plus: [rule_set](#rule_set)
|
||||||
|
:material-plus: [rule_set_ipcidr_match_source](#rule_set_ipcidr_match_source)
|
||||||
|
:material-plus: [source_ip_is_private](#source_ip_is_private)
|
||||||
|
:material-plus: [ip_is_private](#ip_is_private)
|
||||||
|
:material-delete-clock: [source_geoip](#source_geoip)
|
||||||
|
:material-delete-clock: [geoip](#geoip)
|
||||||
|
:material-delete-clock: [geosite](#geosite)
|
||||||
|
|
||||||
### Structure
|
### Structure
|
||||||
|
|
||||||
```json
|
```json
|
||||||
@ -46,10 +60,12 @@
|
|||||||
"10.0.0.0/24",
|
"10.0.0.0/24",
|
||||||
"192.168.0.1"
|
"192.168.0.1"
|
||||||
],
|
],
|
||||||
|
"source_ip_is_private": false,
|
||||||
"ip_cidr": [
|
"ip_cidr": [
|
||||||
"10.0.0.0/24",
|
"10.0.0.0/24",
|
||||||
"192.168.0.1"
|
"192.168.0.1"
|
||||||
],
|
],
|
||||||
|
"ip_is_private": false,
|
||||||
"source_port": [
|
"source_port": [
|
||||||
12345
|
12345
|
||||||
],
|
],
|
||||||
@ -89,6 +105,10 @@
|
|||||||
"wifi_bssid": [
|
"wifi_bssid": [
|
||||||
"00:00:00:00:00:00"
|
"00:00:00:00:00:00"
|
||||||
],
|
],
|
||||||
|
"rule_set": [
|
||||||
|
"geoip-cn",
|
||||||
|
"geosite-cn"
|
||||||
|
],
|
||||||
"invert": false,
|
"invert": false,
|
||||||
"outbound": "direct"
|
"outbound": "direct"
|
||||||
},
|
},
|
||||||
@ -160,23 +180,47 @@ Match domain using regular expression.
|
|||||||
|
|
||||||
#### geosite
|
#### geosite
|
||||||
|
|
||||||
|
!!! failure "Deprecated in sing-box 1.8.0"
|
||||||
|
|
||||||
|
Geosite is deprecated and may be removed in the future, check [Migration](/migration/#migrate-geosite-to-rule-sets).
|
||||||
|
|
||||||
Match geosite.
|
Match geosite.
|
||||||
|
|
||||||
#### source_geoip
|
#### source_geoip
|
||||||
|
|
||||||
|
!!! failure "Deprecated in sing-box 1.8.0"
|
||||||
|
|
||||||
|
GeoIP is deprecated and may be removed in the future, check [Migration](/migration/#migrate-geoip-to-rule-sets).
|
||||||
|
|
||||||
Match source geoip.
|
Match source geoip.
|
||||||
|
|
||||||
#### geoip
|
#### geoip
|
||||||
|
|
||||||
|
!!! failure "Deprecated in sing-box 1.8.0"
|
||||||
|
|
||||||
|
GeoIP is deprecated and may be removed in the future, check [Migration](/migration/#migrate-geoip-to-rule-sets).
|
||||||
|
|
||||||
Match geoip.
|
Match geoip.
|
||||||
|
|
||||||
#### source_ip_cidr
|
#### source_ip_cidr
|
||||||
|
|
||||||
Match source ip cidr.
|
Match source IP CIDR.
|
||||||
|
|
||||||
|
#### ip_is_private
|
||||||
|
|
||||||
|
!!! question "Since sing-box 1.8.0"
|
||||||
|
|
||||||
|
Match non-public IP.
|
||||||
|
|
||||||
#### ip_cidr
|
#### ip_cidr
|
||||||
|
|
||||||
Match ip cidr.
|
Match IP CIDR.
|
||||||
|
|
||||||
|
#### source_ip_is_private
|
||||||
|
|
||||||
|
!!! question "Since sing-box 1.8.0"
|
||||||
|
|
||||||
|
Match non-public source IP.
|
||||||
|
|
||||||
#### source_port
|
#### source_port
|
||||||
|
|
||||||
@ -250,6 +294,18 @@ Match WiFi SSID.
|
|||||||
|
|
||||||
Match WiFi BSSID.
|
Match WiFi BSSID.
|
||||||
|
|
||||||
|
#### rule_set
|
||||||
|
|
||||||
|
!!! question "Since sing-box 1.8.0"
|
||||||
|
|
||||||
|
Match [Rule Set](/configuration/route/#rule_set).
|
||||||
|
|
||||||
|
#### rule_set_ipcidr_match_source
|
||||||
|
|
||||||
|
!!! question "Since sing-box 1.8.0"
|
||||||
|
|
||||||
|
Make `ipcidr` in rule sets match the source IP.
|
||||||
|
|
||||||
#### invert
|
#### invert
|
||||||
|
|
||||||
Invert match result.
|
Invert match result.
|
||||||
@ -276,4 +332,4 @@ Tag of the target outbound.
|
|||||||
|
|
||||||
==Required==
|
==Required==
|
||||||
|
|
||||||
Included default rules.
|
Included rules.
|
||||||
|
@ -1,3 +1,17 @@
|
|||||||
|
---
|
||||||
|
icon: material/alert-decagram
|
||||||
|
---
|
||||||
|
|
||||||
|
!!! quote "sing-box 1.8.0 中的更改"
|
||||||
|
|
||||||
|
:material-plus: [rule_set](#rule_set)
|
||||||
|
:material-plus: [rule_set_ipcidr_match_source](#rule_set_ipcidr_match_source)
|
||||||
|
:material-plus: [source_ip_is_private](#source_ip_is_private)
|
||||||
|
:material-plus: [ip_is_private](#ip_is_private)
|
||||||
|
:material-delete-clock: [source_geoip](#source_geoip)
|
||||||
|
:material-delete-clock: [geoip](#geoip)
|
||||||
|
:material-delete-clock: [geosite](#geosite)
|
||||||
|
|
||||||
### 结构
|
### 结构
|
||||||
|
|
||||||
```json
|
```json
|
||||||
@ -45,9 +59,11 @@
|
|||||||
"source_ip_cidr": [
|
"source_ip_cidr": [
|
||||||
"10.0.0.0/24"
|
"10.0.0.0/24"
|
||||||
],
|
],
|
||||||
|
"source_ip_is_private": false,
|
||||||
"ip_cidr": [
|
"ip_cidr": [
|
||||||
"10.0.0.0/24"
|
"10.0.0.0/24"
|
||||||
],
|
],
|
||||||
|
"ip_is_private": false,
|
||||||
"source_port": [
|
"source_port": [
|
||||||
12345
|
12345
|
||||||
],
|
],
|
||||||
@ -87,6 +103,10 @@
|
|||||||
"wifi_bssid": [
|
"wifi_bssid": [
|
||||||
"00:00:00:00:00:00"
|
"00:00:00:00:00:00"
|
||||||
],
|
],
|
||||||
|
"rule_set": [
|
||||||
|
"geoip-cn",
|
||||||
|
"geosite-cn"
|
||||||
|
],
|
||||||
"invert": false,
|
"invert": false,
|
||||||
"outbound": "direct"
|
"outbound": "direct"
|
||||||
},
|
},
|
||||||
@ -158,24 +178,48 @@
|
|||||||
|
|
||||||
#### geosite
|
#### geosite
|
||||||
|
|
||||||
匹配 GeoSite。
|
!!! failure "已在 sing-box 1.8.0 废弃"
|
||||||
|
|
||||||
|
Geosite 已废弃且可能在不久的将来移除,参阅 [迁移指南](/zh/migration/#geosite)。
|
||||||
|
|
||||||
|
匹配 Geosite。
|
||||||
|
|
||||||
#### source_geoip
|
#### source_geoip
|
||||||
|
|
||||||
|
!!! failure "已在 sing-box 1.8.0 废弃"
|
||||||
|
|
||||||
|
GeoIP 已废弃且可能在不久的将来移除,参阅 [迁移指南](/zh/migration/#geoip)。
|
||||||
|
|
||||||
匹配源 GeoIP。
|
匹配源 GeoIP。
|
||||||
|
|
||||||
#### geoip
|
#### geoip
|
||||||
|
|
||||||
|
!!! failure "已在 sing-box 1.8.0 废弃"
|
||||||
|
|
||||||
|
GeoIP 已废弃且可能在不久的将来移除,参阅 [迁移指南](/zh/migration/#geoip)。
|
||||||
|
|
||||||
匹配 GeoIP。
|
匹配 GeoIP。
|
||||||
|
|
||||||
#### source_ip_cidr
|
#### source_ip_cidr
|
||||||
|
|
||||||
匹配源 IP CIDR。
|
匹配源 IP CIDR。
|
||||||
|
|
||||||
|
#### source_ip_is_private
|
||||||
|
|
||||||
|
!!! question "自 sing-box 1.8.0 起"
|
||||||
|
|
||||||
|
匹配非公开源 IP。
|
||||||
|
|
||||||
#### ip_cidr
|
#### ip_cidr
|
||||||
|
|
||||||
匹配 IP CIDR。
|
匹配 IP CIDR。
|
||||||
|
|
||||||
|
#### ip_is_private
|
||||||
|
|
||||||
|
!!! question "自 sing-box 1.8.0 起"
|
||||||
|
|
||||||
|
匹配非公开 IP。
|
||||||
|
|
||||||
#### source_port
|
#### source_port
|
||||||
|
|
||||||
匹配源端口。
|
匹配源端口。
|
||||||
@ -248,6 +292,18 @@
|
|||||||
|
|
||||||
匹配 WiFi BSSID。
|
匹配 WiFi BSSID。
|
||||||
|
|
||||||
|
#### rule_set
|
||||||
|
|
||||||
|
!!! question "自 sing-box 1.8.0 起"
|
||||||
|
|
||||||
|
匹配[规则集](/zh/configuration/route/#rule_set)。
|
||||||
|
|
||||||
|
#### rule_set_ipcidr_match_source
|
||||||
|
|
||||||
|
!!! question "自 sing-box 1.8.0 起"
|
||||||
|
|
||||||
|
使规则集中的 `ipcidr` 规则匹配源 IP。
|
||||||
|
|
||||||
#### invert
|
#### invert
|
||||||
|
|
||||||
反选匹配结果。
|
反选匹配结果。
|
||||||
@ -274,4 +330,4 @@
|
|||||||
|
|
||||||
==必填==
|
==必填==
|
||||||
|
|
||||||
包括的默认规则。
|
包括的规则。
|
207
docs/configuration/rule-set/headless-rule.md
Normal file
207
docs/configuration/rule-set/headless-rule.md
Normal file
@ -0,0 +1,207 @@
|
|||||||
|
---
|
||||||
|
icon: material/new-box
|
||||||
|
---
|
||||||
|
|
||||||
|
### Structure
|
||||||
|
|
||||||
|
!!! question "Since sing-box 1.8.0"
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"rules": [
|
||||||
|
{
|
||||||
|
"query_type": [
|
||||||
|
"A",
|
||||||
|
"HTTPS",
|
||||||
|
32768
|
||||||
|
],
|
||||||
|
"network": [
|
||||||
|
"tcp"
|
||||||
|
],
|
||||||
|
"domain": [
|
||||||
|
"test.com"
|
||||||
|
],
|
||||||
|
"domain_suffix": [
|
||||||
|
".cn"
|
||||||
|
],
|
||||||
|
"domain_keyword": [
|
||||||
|
"test"
|
||||||
|
],
|
||||||
|
"domain_regex": [
|
||||||
|
"^stun\\..+"
|
||||||
|
],
|
||||||
|
"source_ip_cidr": [
|
||||||
|
"10.0.0.0/24",
|
||||||
|
"192.168.0.1"
|
||||||
|
],
|
||||||
|
"ip_cidr": [
|
||||||
|
"10.0.0.0/24",
|
||||||
|
"192.168.0.1"
|
||||||
|
],
|
||||||
|
"source_port": [
|
||||||
|
12345
|
||||||
|
],
|
||||||
|
"source_port_range": [
|
||||||
|
"1000:2000",
|
||||||
|
":3000",
|
||||||
|
"4000:"
|
||||||
|
],
|
||||||
|
"port": [
|
||||||
|
80,
|
||||||
|
443
|
||||||
|
],
|
||||||
|
"port_range": [
|
||||||
|
"1000:2000",
|
||||||
|
":3000",
|
||||||
|
"4000:"
|
||||||
|
],
|
||||||
|
"process_name": [
|
||||||
|
"curl"
|
||||||
|
],
|
||||||
|
"process_path": [
|
||||||
|
"/usr/bin/curl"
|
||||||
|
],
|
||||||
|
"package_name": [
|
||||||
|
"com.termux"
|
||||||
|
],
|
||||||
|
"wifi_ssid": [
|
||||||
|
"My WIFI"
|
||||||
|
],
|
||||||
|
"wifi_bssid": [
|
||||||
|
"00:00:00:00:00:00"
|
||||||
|
],
|
||||||
|
"invert": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "logical",
|
||||||
|
"mode": "and",
|
||||||
|
"rules": [],
|
||||||
|
"invert": false
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
!!! note ""
|
||||||
|
|
||||||
|
You can ignore the JSON Array [] tag when the content is only one item
|
||||||
|
|
||||||
|
### Default Fields
|
||||||
|
|
||||||
|
!!! note ""
|
||||||
|
|
||||||
|
The default rule uses the following matching logic:
|
||||||
|
(`domain` || `domain_suffix` || `domain_keyword` || `domain_regex` || `ip_cidr`) &&
|
||||||
|
(`port` || `port_range`) &&
|
||||||
|
(`source_port` || `source_port_range`) &&
|
||||||
|
`other fields`
|
||||||
|
|
||||||
|
#### query_type
|
||||||
|
|
||||||
|
DNS query type. Values can be integers or type name strings.
|
||||||
|
|
||||||
|
#### network
|
||||||
|
|
||||||
|
`tcp` or `udp`.
|
||||||
|
|
||||||
|
#### domain
|
||||||
|
|
||||||
|
Match full domain.
|
||||||
|
|
||||||
|
#### domain_suffix
|
||||||
|
|
||||||
|
Match domain suffix.
|
||||||
|
|
||||||
|
#### domain_keyword
|
||||||
|
|
||||||
|
Match domain using keyword.
|
||||||
|
|
||||||
|
#### domain_regex
|
||||||
|
|
||||||
|
Match domain using regular expression.
|
||||||
|
|
||||||
|
#### source_ip_cidr
|
||||||
|
|
||||||
|
Match source IP CIDR.
|
||||||
|
|
||||||
|
#### ip_cidr
|
||||||
|
|
||||||
|
!!! info ""
|
||||||
|
|
||||||
|
`ip_cidr` is an alias for `source_ip_cidr` when the Rule Set is used in DNS rules or `rule_set_ipcidr_match_source` enabled in route rules.
|
||||||
|
|
||||||
|
Match IP CIDR.
|
||||||
|
|
||||||
|
#### source_port
|
||||||
|
|
||||||
|
Match source port.
|
||||||
|
|
||||||
|
#### source_port_range
|
||||||
|
|
||||||
|
Match source port range.
|
||||||
|
|
||||||
|
#### port
|
||||||
|
|
||||||
|
Match port.
|
||||||
|
|
||||||
|
#### port_range
|
||||||
|
|
||||||
|
Match port range.
|
||||||
|
|
||||||
|
#### process_name
|
||||||
|
|
||||||
|
!!! quote ""
|
||||||
|
|
||||||
|
Only supported on Linux, Windows, and macOS.
|
||||||
|
|
||||||
|
Match process name.
|
||||||
|
|
||||||
|
#### process_path
|
||||||
|
|
||||||
|
!!! quote ""
|
||||||
|
|
||||||
|
Only supported on Linux, Windows, and macOS.
|
||||||
|
|
||||||
|
Match process path.
|
||||||
|
|
||||||
|
#### package_name
|
||||||
|
|
||||||
|
Match android package name.
|
||||||
|
|
||||||
|
#### wifi_ssid
|
||||||
|
|
||||||
|
!!! quote ""
|
||||||
|
|
||||||
|
Only supported in graphical clients on Android and iOS.
|
||||||
|
|
||||||
|
Match WiFi SSID.
|
||||||
|
|
||||||
|
#### wifi_bssid
|
||||||
|
|
||||||
|
!!! quote ""
|
||||||
|
|
||||||
|
Only supported in graphical clients on Android and iOS.
|
||||||
|
|
||||||
|
Match WiFi BSSID.
|
||||||
|
|
||||||
|
#### invert
|
||||||
|
|
||||||
|
Invert match result.
|
||||||
|
|
||||||
|
### Logical Fields
|
||||||
|
|
||||||
|
#### type
|
||||||
|
|
||||||
|
`logical`
|
||||||
|
|
||||||
|
#### mode
|
||||||
|
|
||||||
|
==Required==
|
||||||
|
|
||||||
|
`and` or `or`
|
||||||
|
|
||||||
|
#### rules
|
||||||
|
|
||||||
|
==Required==
|
||||||
|
|
||||||
|
Included rules.
|
97
docs/configuration/rule-set/index.md
Normal file
97
docs/configuration/rule-set/index.md
Normal file
@ -0,0 +1,97 @@
|
|||||||
|
---
|
||||||
|
icon: material/new-box
|
||||||
|
---
|
||||||
|
|
||||||
|
# Rule Set
|
||||||
|
|
||||||
|
!!! question "Since sing-box 1.8.0"
|
||||||
|
|
||||||
|
### Structure
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"type": "",
|
||||||
|
"tag": "",
|
||||||
|
"format": "",
|
||||||
|
|
||||||
|
... // Typed Fields
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Local Structure
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"type": "local",
|
||||||
|
|
||||||
|
...
|
||||||
|
|
||||||
|
"path": ""
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Remote Structure
|
||||||
|
|
||||||
|
!!! info ""
|
||||||
|
|
||||||
|
Remote rule-set will be cached if `experimental.cache_file.enabled`.
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"type": "remote",
|
||||||
|
|
||||||
|
...,
|
||||||
|
|
||||||
|
"url": "",
|
||||||
|
"download_detour": "",
|
||||||
|
"update_interval": ""
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Fields
|
||||||
|
|
||||||
|
#### type
|
||||||
|
|
||||||
|
==Required==
|
||||||
|
|
||||||
|
Type of Rule Set, `local` or `remote`.
|
||||||
|
|
||||||
|
#### tag
|
||||||
|
|
||||||
|
==Required==
|
||||||
|
|
||||||
|
Tag of Rule Set.
|
||||||
|
|
||||||
|
#### format
|
||||||
|
|
||||||
|
==Required==
|
||||||
|
|
||||||
|
Format of Rule Set, `source` or `binary`.
|
||||||
|
|
||||||
|
### Local Fields
|
||||||
|
|
||||||
|
#### path
|
||||||
|
|
||||||
|
==Required==
|
||||||
|
|
||||||
|
File path of Rule Set.
|
||||||
|
|
||||||
|
### Remote Fields
|
||||||
|
|
||||||
|
#### url
|
||||||
|
|
||||||
|
==Required==
|
||||||
|
|
||||||
|
Download URL of Rule Set.
|
||||||
|
|
||||||
|
#### download_detour
|
||||||
|
|
||||||
|
Tag of the outbound to download rule-set.
|
||||||
|
|
||||||
|
Default outbound will be used if empty.
|
||||||
|
|
||||||
|
#### update_interval
|
||||||
|
|
||||||
|
Update interval of Rule Set.
|
||||||
|
|
||||||
|
`1d` will be used if empty.
|
34
docs/configuration/rule-set/source-format.md
Normal file
34
docs/configuration/rule-set/source-format.md
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
---
|
||||||
|
icon: material/new-box
|
||||||
|
---
|
||||||
|
|
||||||
|
# Source Format
|
||||||
|
|
||||||
|
!!! question "Since sing-box 1.8.0"
|
||||||
|
|
||||||
|
### Structure
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"version": 1,
|
||||||
|
"rules": []
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Compile
|
||||||
|
|
||||||
|
Use `sing-box rule-set compile [--output <file-name>.srs] <file-name>.json` to compile source to binary rule-set.
|
||||||
|
|
||||||
|
### Fields
|
||||||
|
|
||||||
|
#### version
|
||||||
|
|
||||||
|
==Required==
|
||||||
|
|
||||||
|
Version of Rule Set, must be `1`.
|
||||||
|
|
||||||
|
#### rules
|
||||||
|
|
||||||
|
==Required==
|
||||||
|
|
||||||
|
List of [Headless Rule](./headless-rule.md).
|
@ -199,10 +199,6 @@ The path to the server private key, in PEM format.
|
|||||||
|
|
||||||
==Client only==
|
==Client only==
|
||||||
|
|
||||||
!!! warning ""
|
|
||||||
|
|
||||||
uTLS is not included by default, see [Installation](./#installation).
|
|
||||||
|
|
||||||
!!! note ""
|
!!! note ""
|
||||||
|
|
||||||
uTLS is poorly maintained and the effect may be unproven, use at your own risk.
|
uTLS is poorly maintained and the effect may be unproven, use at your own risk.
|
||||||
@ -226,10 +222,6 @@ Chrome fingerprint will be used if empty.
|
|||||||
|
|
||||||
### ECH Fields
|
### ECH Fields
|
||||||
|
|
||||||
!!! warning ""
|
|
||||||
|
|
||||||
ECH is not included by default, see [Installation](./#installation).
|
|
||||||
|
|
||||||
ECH (Encrypted Client Hello) is a TLS extension that allows a client to encrypt the first part of its ClientHello
|
ECH (Encrypted Client Hello) is a TLS extension that allows a client to encrypt the first part of its ClientHello
|
||||||
message.
|
message.
|
||||||
|
|
||||||
@ -278,10 +270,6 @@ If empty, load from DNS will be attempted.
|
|||||||
|
|
||||||
### ACME Fields
|
### ACME Fields
|
||||||
|
|
||||||
!!! warning ""
|
|
||||||
|
|
||||||
ACME is not included by default, see [Installation](./#installation).
|
|
||||||
|
|
||||||
#### domain
|
#### domain
|
||||||
|
|
||||||
List of domain.
|
List of domain.
|
||||||
@ -357,14 +345,6 @@ See [DNS01 Challenge Fields](/configuration/shared/dns01_challenge) for details.
|
|||||||
|
|
||||||
### Reality Fields
|
### Reality Fields
|
||||||
|
|
||||||
!!! warning ""
|
|
||||||
|
|
||||||
reality server is not included by default, see [Installation](./#installation).
|
|
||||||
|
|
||||||
!!! warning ""
|
|
||||||
|
|
||||||
uTLS, which is required by reality client is not included by default, see [Installation](./#installation).
|
|
||||||
|
|
||||||
#### handshake
|
#### handshake
|
||||||
|
|
||||||
==Server only==
|
==Server only==
|
||||||
|
@ -193,10 +193,6 @@ TLS 版本值:
|
|||||||
|
|
||||||
==仅客户端==
|
==仅客户端==
|
||||||
|
|
||||||
!!! warning ""
|
|
||||||
|
|
||||||
默认安装不包含 uTLS, 参阅 [安装](/zh/#_2)。
|
|
||||||
|
|
||||||
!!! note ""
|
!!! note ""
|
||||||
|
|
||||||
uTLS 维护不善且其效果可能未经证实,使用风险自负。
|
uTLS 维护不善且其效果可能未经证实,使用风险自负。
|
||||||
@ -220,14 +216,9 @@ uTLS 是 "crypto/tls" 的一个分支,它提供了 ClientHello 指纹识别阻
|
|||||||
|
|
||||||
## ECH 字段
|
## ECH 字段
|
||||||
|
|
||||||
!!! warning ""
|
|
||||||
|
|
||||||
默认安装不包含 ECH, 参阅 [安装](/zh/#_2)。
|
|
||||||
|
|
||||||
ECH (Encrypted Client Hello) 是一个 TLS 扩展,它允许客户端加密其 ClientHello 的第一部分
|
ECH (Encrypted Client Hello) 是一个 TLS 扩展,它允许客户端加密其 ClientHello 的第一部分
|
||||||
信息。
|
信息。
|
||||||
|
|
||||||
|
|
||||||
ECH 配置和密钥可以通过 `sing-box generate ech-keypair [--pq-signature-schemes-enabled]` 生成。
|
ECH 配置和密钥可以通过 `sing-box generate ech-keypair [--pq-signature-schemes-enabled]` 生成。
|
||||||
|
|
||||||
#### pq_signature_schemes_enabled
|
#### pq_signature_schemes_enabled
|
||||||
@ -273,10 +264,6 @@ ECH PEM 配置路径
|
|||||||
|
|
||||||
### ACME 字段
|
### ACME 字段
|
||||||
|
|
||||||
!!! warning ""
|
|
||||||
|
|
||||||
默认安装不包含 ACME,参阅 [安装](/zh/#_2)。
|
|
||||||
|
|
||||||
#### domain
|
#### domain
|
||||||
|
|
||||||
一组域名。
|
一组域名。
|
||||||
@ -348,14 +335,6 @@ ACME DNS01 验证字段。如果配置,将禁用其他验证方法。
|
|||||||
|
|
||||||
### Reality 字段
|
### Reality 字段
|
||||||
|
|
||||||
!!! warning ""
|
|
||||||
|
|
||||||
默认安装不包含 reality 服务器,参阅 [安装](/zh/#_2)。
|
|
||||||
|
|
||||||
!!! warning ""
|
|
||||||
|
|
||||||
默认安装不包含被 reality 客户端需要的 uTLS, 参阅 [安装](/zh/#_2)。
|
|
||||||
|
|
||||||
#### handshake
|
#### handshake
|
||||||
|
|
||||||
==仅服务器==
|
==仅服务器==
|
||||||
|
@ -129,10 +129,6 @@ It needs to be consistent with the server.
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! warning ""
|
|
||||||
|
|
||||||
QUIC is not included by default, see [Installation](./#installation).
|
|
||||||
|
|
||||||
!!! warning "Difference from v2ray-core"
|
!!! warning "Difference from v2ray-core"
|
||||||
|
|
||||||
No additional encryption support:
|
No additional encryption support:
|
||||||
@ -142,7 +138,7 @@ It needs to be consistent with the server.
|
|||||||
|
|
||||||
!!! note ""
|
!!! note ""
|
||||||
|
|
||||||
standard gRPC has good compatibility but poor performance and is not included by default, see [Installation](./#installation).
|
standard gRPC has good compatibility but poor performance and is not included by default, see [Installation](/installation/build-from-source/#build-tags).
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
|
@ -128,10 +128,6 @@ HTTP 请求的额外标头。
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! warning ""
|
|
||||||
|
|
||||||
默认安装不包含 QUIC, 参阅 [安装](/zh/#_2)。
|
|
||||||
|
|
||||||
!!! warning "与 v2ray-core 的区别"
|
!!! warning "与 v2ray-core 的区别"
|
||||||
|
|
||||||
没有额外的加密支持:
|
没有额外的加密支持:
|
||||||
@ -141,7 +137,7 @@ HTTP 请求的额外标头。
|
|||||||
|
|
||||||
!!! note ""
|
!!! note ""
|
||||||
|
|
||||||
默认安装不包含标准 gRPC (兼容性好,但性能较差), 参阅 [安装](/zh/#_2)。
|
默认安装不包含标准 gRPC (兼容性好,但性能较差), 参阅 [安装](/zh/installation/build-from-source/#_5)。
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
|
@ -4,7 +4,37 @@ icon: material/delete-alert
|
|||||||
|
|
||||||
# Deprecated Feature List
|
# Deprecated Feature List
|
||||||
|
|
||||||
### 1.6.0
|
## 1.8.0
|
||||||
|
|
||||||
|
#### Cache file and related features in Clash API
|
||||||
|
|
||||||
|
`cache_file` and related features in Clash API is migrated to independent `cache_file` options,
|
||||||
|
check [Migration](/migration/#migrate-cache-file-from-clash-api-to-independent-options).
|
||||||
|
|
||||||
|
#### GeoIP
|
||||||
|
|
||||||
|
GeoIP is deprecated and may be removed in the future.
|
||||||
|
|
||||||
|
The maxmind GeoIP National Database, as an IP classification database,
|
||||||
|
is not entirely suitable for traffic bypassing,
|
||||||
|
and all existing implementations suffer from high memory usage and difficult management.
|
||||||
|
|
||||||
|
sing-box 1.8.0 introduces [Rule Set](/configuration/rule_set), which can completely replace GeoIP,
|
||||||
|
check [Migration](/migration/#migrate-geoip-to-rule-sets).
|
||||||
|
|
||||||
|
#### Geosite
|
||||||
|
|
||||||
|
Geosite is deprecated and may be removed in the future.
|
||||||
|
|
||||||
|
Geosite, the `domain-list-community` project maintained by V2Ray as an early traffic bypassing solution,
|
||||||
|
suffers from a number of problems, including lack of maintenance, inaccurate rules, and difficult management.
|
||||||
|
|
||||||
|
sing-box 1.8.0 introduces [Rule Set](/configuration/rule_set), which can completely replace Geosite,
|
||||||
|
check [Migration](/migration/#migrate-geosite-to-rule-sets).
|
||||||
|
|
||||||
|
Geosite,即由 V2Ray 维护的 domain-list-community 项目,作为早期流量绕过解决方案,存在着大量问题,包括缺少维护、规则不准确、管理困难。
|
||||||
|
|
||||||
|
## 1.6.0
|
||||||
|
|
||||||
The following features will be marked deprecated in 1.5.0 and removed entirely in 1.6.0.
|
The following features will be marked deprecated in 1.5.0 and removed entirely in 1.6.0.
|
||||||
|
|
||||||
|
@ -4,7 +4,34 @@ icon: material/delete-alert
|
|||||||
|
|
||||||
# 废弃功能列表
|
# 废弃功能列表
|
||||||
|
|
||||||
### 1.6.0
|
## 1.8.0
|
||||||
|
|
||||||
|
#### Clash API 中的 Cache file 及相关功能
|
||||||
|
|
||||||
|
Clash API 中的 `cache_file` 及相关功能已废弃且已迁移到独立的 `cache_file` 设置,
|
||||||
|
参阅 [迁移指南](/zh/migration/#clash-api)。
|
||||||
|
|
||||||
|
#### GeoIP
|
||||||
|
|
||||||
|
GeoIP 已废弃且可能在不久的将来移除。
|
||||||
|
|
||||||
|
maxmind GeoIP 国家数据库作为 IP 分类数据库,不完全适合流量绕过,
|
||||||
|
且现有的实现均存在内存使用大与管理困难的问题。
|
||||||
|
|
||||||
|
sing-box 1.8.0 引入了[规则集](/configuration/rule_set),
|
||||||
|
可以完全替代 GeoIP, 参阅 [迁移指南](/zh/migration/#geoip)。
|
||||||
|
|
||||||
|
#### Geosite
|
||||||
|
|
||||||
|
Geosite 已废弃且可能在不久的将来移除。
|
||||||
|
|
||||||
|
Geosite,即由 V2Ray 维护的 domain-list-community 项目,作为早期流量绕过解决方案,
|
||||||
|
存在着包括缺少维护、规则不准确和管理困难内的大量问题。
|
||||||
|
|
||||||
|
sing-box 1.8.0 引入了[规则集](/configuration/rule_set),
|
||||||
|
可以完全替代 Geosite,参阅 [迁移指南](/zh/migration/#geosite)。
|
||||||
|
|
||||||
|
## 1.6.0
|
||||||
|
|
||||||
下列功能已在 1.5.0 中标记为已弃用,并在 1.6.0 中完全删除。
|
下列功能已在 1.5.0 中标记为已弃用,并在 1.6.0 中完全删除。
|
||||||
|
|
||||||
|
@ -23,7 +23,7 @@ You can download and install Go from: https://go.dev/doc/install, latest version
|
|||||||
make
|
make
|
||||||
```
|
```
|
||||||
|
|
||||||
Or build and install binary to `GOBIN`:
|
Or build and install binary to `$GOBIN`:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
make install
|
make install
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user