lease.go 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607
  1. // Copyright 2016 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package clientv3
  15. import (
  16. "context"
  17. "sync"
  18. "time"
  19. pb "go.etcd.io/etcd/api/v3/etcdserverpb"
  20. "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
  21. "go.uber.org/zap"
  22. "google.golang.org/grpc"
  23. "google.golang.org/grpc/metadata"
  24. )
  25. type (
  26. LeaseRevokeResponse pb.LeaseRevokeResponse
  27. LeaseID int64
  28. )
  29. // LeaseGrantResponse wraps the protobuf message LeaseGrantResponse.
  30. type LeaseGrantResponse struct {
  31. *pb.ResponseHeader
  32. ID LeaseID
  33. TTL int64
  34. Error string
  35. }
  36. // LeaseKeepAliveResponse wraps the protobuf message LeaseKeepAliveResponse.
  37. type LeaseKeepAliveResponse struct {
  38. *pb.ResponseHeader
  39. ID LeaseID
  40. TTL int64
  41. }
  42. // LeaseTimeToLiveResponse wraps the protobuf message LeaseTimeToLiveResponse.
  43. type LeaseTimeToLiveResponse struct {
  44. *pb.ResponseHeader
  45. ID LeaseID `json:"id"`
  46. // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. Expired lease will return -1.
  47. TTL int64 `json:"ttl"`
  48. // GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
  49. GrantedTTL int64 `json:"granted-ttl"`
  50. // Keys is the list of keys attached to this lease.
  51. Keys [][]byte `json:"keys"`
  52. }
  53. // LeaseStatus represents a lease status.
  54. type LeaseStatus struct {
  55. ID LeaseID `json:"id"`
  56. // TODO: TTL int64
  57. }
  58. // LeaseLeasesResponse wraps the protobuf message LeaseLeasesResponse.
  59. type LeaseLeasesResponse struct {
  60. *pb.ResponseHeader
  61. Leases []LeaseStatus `json:"leases"`
  62. }
  63. const (
  64. // defaultTTL is the assumed lease TTL used for the first keepalive
  65. // deadline before the actual TTL is known to the client.
  66. defaultTTL = 5 * time.Second
  67. // NoLease is a lease ID for the absence of a lease.
  68. NoLease LeaseID = 0
  69. // retryConnWait is how long to wait before retrying request due to an error
  70. retryConnWait = 500 * time.Millisecond
  71. )
  72. // LeaseResponseChSize is the size of buffer to store unsent lease responses.
  73. // WARNING: DO NOT UPDATE.
  74. // Only for testing purposes.
  75. var LeaseResponseChSize = 16
  76. // ErrKeepAliveHalted is returned if client keep alive loop halts with an unexpected error.
  77. //
  78. // This usually means that automatic lease renewal via KeepAlive is broken, but KeepAliveOnce will still work as expected.
  79. type ErrKeepAliveHalted struct {
  80. Reason error
  81. }
  82. func (e ErrKeepAliveHalted) Error() string {
  83. s := "etcdclient: leases keep alive halted"
  84. if e.Reason != nil {
  85. s += ": " + e.Reason.Error()
  86. }
  87. return s
  88. }
  89. type Lease interface {
  90. // Grant creates a new lease.
  91. Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error)
  92. // Revoke revokes the given lease.
  93. Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error)
  94. // TimeToLive retrieves the lease information of the given lease ID.
  95. TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error)
  96. // Leases retrieves all leases.
  97. Leases(ctx context.Context) (*LeaseLeasesResponse, error)
  98. // KeepAlive attempts to keep the given lease alive forever. If the keepalive responses posted
  99. // to the channel are not consumed promptly the channel may become full. When full, the lease
  100. // client will continue sending keep alive requests to the etcd server, but will drop responses
  101. // until there is capacity on the channel to send more responses.
  102. //
  103. // If client keep alive loop halts with an unexpected error (e.g. "etcdserver: no leader") or
  104. // canceled by the caller (e.g. context.Canceled), KeepAlive returns a ErrKeepAliveHalted error
  105. // containing the error reason.
  106. //
  107. // The returned "LeaseKeepAliveResponse" channel closes if underlying keep
  108. // alive stream is interrupted in some way the client cannot handle itself;
  109. // given context "ctx" is canceled or timed out.
  110. //
  111. // TODO(v4.0): post errors to last keep alive message before closing
  112. // (see https://github.com/etcd-io/etcd/pull/7866)
  113. KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error)
  114. // KeepAliveOnce renews the lease once. The response corresponds to the
  115. // first message from calling KeepAlive. If the response has a recoverable
  116. // error, KeepAliveOnce will retry the RPC with a new keep alive message.
  117. //
  118. // In most of the cases, Keepalive should be used instead of KeepAliveOnce.
  119. KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error)
  120. // Close releases all resources Lease keeps for efficient communication
  121. // with the etcd server.
  122. Close() error
  123. }
  124. type lessor struct {
  125. mu sync.Mutex // guards all fields
  126. // donec is closed and loopErr is set when recvKeepAliveLoop stops
  127. donec chan struct{}
  128. loopErr error
  129. remote pb.LeaseClient
  130. stream pb.Lease_LeaseKeepAliveClient
  131. streamCancel context.CancelFunc
  132. stopCtx context.Context
  133. stopCancel context.CancelFunc
  134. keepAlives map[LeaseID]*keepAlive
  135. // firstKeepAliveTimeout is the timeout for the first keepalive request
  136. // before the actual TTL is known to the lease client
  137. firstKeepAliveTimeout time.Duration
  138. // firstKeepAliveOnce ensures stream starts after first KeepAlive call.
  139. firstKeepAliveOnce sync.Once
  140. callOpts []grpc.CallOption
  141. lg *zap.Logger
  142. }
  143. // keepAlive multiplexes a keepalive for a lease over multiple channels
  144. type keepAlive struct {
  145. chs []chan<- *LeaseKeepAliveResponse
  146. ctxs []context.Context
  147. // deadline is the time the keep alive channels close if no response
  148. deadline time.Time
  149. // nextKeepAlive is when to send the next keep alive message
  150. nextKeepAlive time.Time
  151. // donec is closed on lease revoke, expiration, or cancel.
  152. donec chan struct{}
  153. }
  154. func NewLease(c *Client) Lease {
  155. return NewLeaseFromLeaseClient(RetryLeaseClient(c), c, c.cfg.DialTimeout+time.Second)
  156. }
  157. func NewLeaseFromLeaseClient(remote pb.LeaseClient, c *Client, keepAliveTimeout time.Duration) Lease {
  158. l := &lessor{
  159. donec: make(chan struct{}),
  160. keepAlives: make(map[LeaseID]*keepAlive),
  161. remote: remote,
  162. firstKeepAliveTimeout: keepAliveTimeout,
  163. lg: c.lg,
  164. }
  165. if l.firstKeepAliveTimeout == time.Second {
  166. l.firstKeepAliveTimeout = defaultTTL
  167. }
  168. if c != nil {
  169. l.callOpts = c.callOpts
  170. }
  171. reqLeaderCtx := WithRequireLeader(context.Background())
  172. l.stopCtx, l.stopCancel = context.WithCancel(reqLeaderCtx)
  173. return l
  174. }
  175. func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) {
  176. r := &pb.LeaseGrantRequest{TTL: ttl}
  177. resp, err := l.remote.LeaseGrant(ctx, r, l.callOpts...)
  178. if err == nil {
  179. gresp := &LeaseGrantResponse{
  180. ResponseHeader: resp.GetHeader(),
  181. ID: LeaseID(resp.ID),
  182. TTL: resp.TTL,
  183. Error: resp.Error,
  184. }
  185. return gresp, nil
  186. }
  187. return nil, toErr(ctx, err)
  188. }
  189. func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) {
  190. r := &pb.LeaseRevokeRequest{ID: int64(id)}
  191. resp, err := l.remote.LeaseRevoke(ctx, r, l.callOpts...)
  192. if err == nil {
  193. return (*LeaseRevokeResponse)(resp), nil
  194. }
  195. return nil, toErr(ctx, err)
  196. }
  197. func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) {
  198. r := toLeaseTimeToLiveRequest(id, opts...)
  199. resp, err := l.remote.LeaseTimeToLive(ctx, r, l.callOpts...)
  200. if err != nil {
  201. return nil, toErr(ctx, err)
  202. }
  203. gresp := &LeaseTimeToLiveResponse{
  204. ResponseHeader: resp.GetHeader(),
  205. ID: LeaseID(resp.ID),
  206. TTL: resp.TTL,
  207. GrantedTTL: resp.GrantedTTL,
  208. Keys: resp.Keys,
  209. }
  210. return gresp, nil
  211. }
  212. func (l *lessor) Leases(ctx context.Context) (*LeaseLeasesResponse, error) {
  213. resp, err := l.remote.LeaseLeases(ctx, &pb.LeaseLeasesRequest{}, l.callOpts...)
  214. if err == nil {
  215. leases := make([]LeaseStatus, len(resp.Leases))
  216. for i := range resp.Leases {
  217. leases[i] = LeaseStatus{ID: LeaseID(resp.Leases[i].ID)}
  218. }
  219. return &LeaseLeasesResponse{ResponseHeader: resp.GetHeader(), Leases: leases}, nil
  220. }
  221. return nil, toErr(ctx, err)
  222. }
  223. func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) {
  224. ch := make(chan *LeaseKeepAliveResponse, LeaseResponseChSize)
  225. l.mu.Lock()
  226. // ensure that recvKeepAliveLoop is still running
  227. select {
  228. case <-l.donec:
  229. err := l.loopErr
  230. l.mu.Unlock()
  231. close(ch)
  232. return ch, ErrKeepAliveHalted{Reason: err}
  233. default:
  234. }
  235. ka, ok := l.keepAlives[id]
  236. if !ok {
  237. // create fresh keep alive
  238. ka = &keepAlive{
  239. chs: []chan<- *LeaseKeepAliveResponse{ch},
  240. ctxs: []context.Context{ctx},
  241. deadline: time.Now().Add(l.firstKeepAliveTimeout),
  242. nextKeepAlive: time.Now(),
  243. donec: make(chan struct{}),
  244. }
  245. l.keepAlives[id] = ka
  246. } else {
  247. // add channel and context to existing keep alive
  248. ka.ctxs = append(ka.ctxs, ctx)
  249. ka.chs = append(ka.chs, ch)
  250. }
  251. l.mu.Unlock()
  252. if ctx.Done() != nil {
  253. go l.keepAliveCtxCloser(ctx, id, ka.donec)
  254. }
  255. l.firstKeepAliveOnce.Do(func() {
  256. go l.recvKeepAliveLoop()
  257. go l.deadlineLoop()
  258. })
  259. return ch, nil
  260. }
  261. func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
  262. for {
  263. resp, err := l.keepAliveOnce(ctx, id)
  264. if err == nil {
  265. if resp.TTL <= 0 {
  266. err = rpctypes.ErrLeaseNotFound
  267. }
  268. return resp, err
  269. }
  270. if isHaltErr(ctx, err) {
  271. return nil, toErr(ctx, err)
  272. }
  273. }
  274. }
  275. func (l *lessor) Close() error {
  276. l.stopCancel()
  277. // close for synchronous teardown if stream goroutines never launched
  278. l.firstKeepAliveOnce.Do(func() { close(l.donec) })
  279. <-l.donec
  280. return nil
  281. }
  282. func (l *lessor) keepAliveCtxCloser(ctx context.Context, id LeaseID, donec <-chan struct{}) {
  283. select {
  284. case <-donec:
  285. return
  286. case <-l.donec:
  287. return
  288. case <-ctx.Done():
  289. }
  290. l.mu.Lock()
  291. defer l.mu.Unlock()
  292. ka, ok := l.keepAlives[id]
  293. if !ok {
  294. return
  295. }
  296. // close channel and remove context if still associated with keep alive
  297. for i, c := range ka.ctxs {
  298. if c == ctx {
  299. close(ka.chs[i])
  300. ka.ctxs = append(ka.ctxs[:i], ka.ctxs[i+1:]...)
  301. ka.chs = append(ka.chs[:i], ka.chs[i+1:]...)
  302. break
  303. }
  304. }
  305. // remove if no one more listeners
  306. if len(ka.chs) == 0 {
  307. delete(l.keepAlives, id)
  308. }
  309. }
  310. // closeRequireLeader scans keepAlives for ctxs that have require leader
  311. // and closes the associated channels.
  312. func (l *lessor) closeRequireLeader() {
  313. l.mu.Lock()
  314. defer l.mu.Unlock()
  315. for _, ka := range l.keepAlives {
  316. reqIdxs := 0
  317. // find all required leader channels, close, mark as nil
  318. for i, ctx := range ka.ctxs {
  319. md, ok := metadata.FromOutgoingContext(ctx)
  320. if !ok {
  321. continue
  322. }
  323. ks := md[rpctypes.MetadataRequireLeaderKey]
  324. if len(ks) < 1 || ks[0] != rpctypes.MetadataHasLeader {
  325. continue
  326. }
  327. close(ka.chs[i])
  328. ka.chs[i] = nil
  329. reqIdxs++
  330. }
  331. if reqIdxs == 0 {
  332. continue
  333. }
  334. // remove all channels that required a leader from keepalive
  335. newChs := make([]chan<- *LeaseKeepAliveResponse, len(ka.chs)-reqIdxs)
  336. newCtxs := make([]context.Context, len(newChs))
  337. newIdx := 0
  338. for i := range ka.chs {
  339. if ka.chs[i] == nil {
  340. continue
  341. }
  342. newChs[newIdx], newCtxs[newIdx] = ka.chs[i], ka.ctxs[newIdx]
  343. newIdx++
  344. }
  345. ka.chs, ka.ctxs = newChs, newCtxs
  346. }
  347. }
  348. func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (karesp *LeaseKeepAliveResponse, ferr error) {
  349. cctx, cancel := context.WithCancel(ctx)
  350. defer cancel()
  351. stream, err := l.remote.LeaseKeepAlive(cctx, l.callOpts...)
  352. if err != nil {
  353. return nil, toErr(ctx, err)
  354. }
  355. defer func() {
  356. if err := stream.CloseSend(); err != nil {
  357. if ferr == nil {
  358. ferr = toErr(ctx, err)
  359. }
  360. return
  361. }
  362. }()
  363. err = stream.Send(&pb.LeaseKeepAliveRequest{ID: int64(id)})
  364. if err != nil {
  365. return nil, toErr(ctx, err)
  366. }
  367. resp, rerr := stream.Recv()
  368. if rerr != nil {
  369. return nil, toErr(ctx, rerr)
  370. }
  371. karesp = &LeaseKeepAliveResponse{
  372. ResponseHeader: resp.GetHeader(),
  373. ID: LeaseID(resp.ID),
  374. TTL: resp.TTL,
  375. }
  376. return karesp, nil
  377. }
  378. func (l *lessor) recvKeepAliveLoop() (gerr error) {
  379. defer func() {
  380. l.mu.Lock()
  381. close(l.donec)
  382. l.loopErr = gerr
  383. for _, ka := range l.keepAlives {
  384. ka.close()
  385. }
  386. l.keepAlives = make(map[LeaseID]*keepAlive)
  387. l.mu.Unlock()
  388. }()
  389. for {
  390. stream, err := l.resetRecv()
  391. if err != nil {
  392. if canceledByCaller(l.stopCtx, err) {
  393. return err
  394. }
  395. } else {
  396. for {
  397. resp, err := stream.Recv()
  398. if err != nil {
  399. if canceledByCaller(l.stopCtx, err) {
  400. return err
  401. }
  402. if toErr(l.stopCtx, err) == rpctypes.ErrNoLeader {
  403. l.closeRequireLeader()
  404. }
  405. break
  406. }
  407. l.recvKeepAlive(resp)
  408. }
  409. }
  410. select {
  411. case <-time.After(retryConnWait):
  412. case <-l.stopCtx.Done():
  413. return l.stopCtx.Err()
  414. }
  415. }
  416. }
  417. // resetRecv opens a new lease stream and starts sending keep alive requests.
  418. func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) {
  419. sctx, cancel := context.WithCancel(l.stopCtx)
  420. stream, err := l.remote.LeaseKeepAlive(sctx, append(l.callOpts, withMax(0))...)
  421. if err != nil {
  422. cancel()
  423. return nil, err
  424. }
  425. l.mu.Lock()
  426. defer l.mu.Unlock()
  427. if l.stream != nil && l.streamCancel != nil {
  428. l.streamCancel()
  429. }
  430. l.streamCancel = cancel
  431. l.stream = stream
  432. go l.sendKeepAliveLoop(stream)
  433. return stream, nil
  434. }
  435. // recvKeepAlive updates a lease based on its LeaseKeepAliveResponse
  436. func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) {
  437. karesp := &LeaseKeepAliveResponse{
  438. ResponseHeader: resp.GetHeader(),
  439. ID: LeaseID(resp.ID),
  440. TTL: resp.TTL,
  441. }
  442. l.mu.Lock()
  443. defer l.mu.Unlock()
  444. ka, ok := l.keepAlives[karesp.ID]
  445. if !ok {
  446. return
  447. }
  448. if karesp.TTL <= 0 {
  449. // lease expired; close all keep alive channels
  450. delete(l.keepAlives, karesp.ID)
  451. ka.close()
  452. return
  453. }
  454. // send update to all channels
  455. nextKeepAlive := time.Now().Add((time.Duration(karesp.TTL) * time.Second) / 3.0)
  456. ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second)
  457. for _, ch := range ka.chs {
  458. select {
  459. case ch <- karesp:
  460. default:
  461. if l.lg != nil {
  462. l.lg.Warn("lease keepalive response queue is full; dropping response send",
  463. zap.Int("queue-size", len(ch)),
  464. zap.Int("queue-capacity", cap(ch)),
  465. )
  466. }
  467. }
  468. // still advance in order to rate-limit keep-alive sends
  469. ka.nextKeepAlive = nextKeepAlive
  470. }
  471. }
  472. // deadlineLoop reaps any keep alive channels that have not received a response
  473. // within the lease TTL
  474. func (l *lessor) deadlineLoop() {
  475. for {
  476. select {
  477. case <-time.After(time.Second):
  478. case <-l.donec:
  479. return
  480. }
  481. now := time.Now()
  482. l.mu.Lock()
  483. for id, ka := range l.keepAlives {
  484. if ka.deadline.Before(now) {
  485. // waited too long for response; lease may be expired
  486. ka.close()
  487. delete(l.keepAlives, id)
  488. }
  489. }
  490. l.mu.Unlock()
  491. }
  492. }
  493. // sendKeepAliveLoop sends keep alive requests for the lifetime of the given stream.
  494. func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
  495. for {
  496. var tosend []LeaseID
  497. now := time.Now()
  498. l.mu.Lock()
  499. for id, ka := range l.keepAlives {
  500. if ka.nextKeepAlive.Before(now) {
  501. tosend = append(tosend, id)
  502. }
  503. }
  504. l.mu.Unlock()
  505. for _, id := range tosend {
  506. r := &pb.LeaseKeepAliveRequest{ID: int64(id)}
  507. if err := stream.Send(r); err != nil {
  508. // TODO do something with this error?
  509. return
  510. }
  511. }
  512. select {
  513. case <-time.After(retryConnWait):
  514. case <-stream.Context().Done():
  515. return
  516. case <-l.donec:
  517. return
  518. case <-l.stopCtx.Done():
  519. return
  520. }
  521. }
  522. }
  523. func (ka *keepAlive) close() {
  524. close(ka.donec)
  525. for _, ch := range ka.chs {
  526. close(ch)
  527. }
  528. }