• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Scala ConcurrentHashMap类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Scala中java.util.concurrent.ConcurrentHashMap的典型用法代码示例。如果您正苦于以下问题:Scala ConcurrentHashMap类的具体用法?Scala ConcurrentHashMap怎么用?Scala ConcurrentHashMap使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了ConcurrentHashMap类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Scala代码示例。

示例1:

//设置package包名称以及导入依赖的类
package com.shashank.akkahttp.project

import java.util.concurrent.ConcurrentHashMap

import akka.actor.ActorSystem
import akka.http.scaladsl.server.Directives._
import akka.stream.ActorMaterializer
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._
import com.shashank.akkahttp.project.Models.{LoadRequest, ServiceJsonProtoocol}
import spray.json.JsArray

import scala.collection.JavaConverters._
import spray.json.{DefaultJsonProtocol, JsArray, pimpAny}
import spray.json.DefaultJsonProtocol._
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql._


trait RestService {
  implicit val system: ActorSystem
  implicit val materializer: ActorMaterializer
  implicit val sparkSession: SparkSession
  val datasetMap = new ConcurrentHashMap[String, Dataset[Row]]()

  import ServiceJsonProtoocol._

  val route =
    pathSingleSlash {
      get {
        complete {
          "welcome to rest service"
        }
      }
    } ~
      path("load") {
        post {
          entity(as[LoadRequest]) {
            loadRequest => complete {
              val id = "" + System.nanoTime()
              val dataset = sparkSession.read.format("csv")
                .option("header", "true")
                .load(loadRequest.path)
              datasetMap.put(id, dataset)
              id
            }
          }
        }
      } ~
      path("view" / """[\w[0-9]-_]+""".r) { id =>
        get {
          complete {
            val dataset = datasetMap.get(id)
            dataset.take(10).map(row => row.toString())
          }
        }
      }
} 
开发者ID:shashankgowdal,项目名称:introduction-to-akkahttp,代码行数:58,代码来源:RestService.scala


示例2: RemoteMetricsOn

//设置package包名称以及导入依赖的类
package akka.remote

import java.util.concurrent.ConcurrentHashMap
import scala.annotation.tailrec
import akka.actor.ActorSelectionMessage
import akka.actor.ActorSystem
import akka.actor.ExtendedActorSystem
import akka.actor.Extension
import akka.actor.ExtensionId
import akka.actor.ExtensionIdProvider
import akka.event.Logging
import akka.routing.RouterEnvelope


private[akka] class RemoteMetricsOn(system: ExtendedActorSystem) extends RemoteMetrics {

  private val logFrameSizeExceeding: Int = system.settings.config.getBytes(
    "akka.remote.log-frame-size-exceeding").toInt
  private val log = Logging(system, this.getClass)
  private val maxPayloadBytes: ConcurrentHashMap[Class[_], Integer] = new ConcurrentHashMap

  override def logPayloadBytes(msg: Any, payloadBytes: Int): Unit =
    if (payloadBytes >= logFrameSizeExceeding) {
      val clazz = msg match {
        case x: ActorSelectionMessage ? x.msg.getClass
        case x: RouterEnvelope        ? x.message.getClass
        case _                        ? msg.getClass
      }

      // 10% threshold until next log
      def newMax = (payloadBytes * 1.1).toInt

      @tailrec def check(): Unit = {
        val max = maxPayloadBytes.get(clazz)
        if (max eq null) {
          if (maxPayloadBytes.putIfAbsent(clazz, newMax) eq null)
            log.info("Payload size for [{}] is [{}] bytes", clazz.getName, payloadBytes)
          else check()
        } else if (payloadBytes > max) {
          if (maxPayloadBytes.replace(clazz, max, newMax))
            log.info("New maximum payload size for [{}] is [{}] bytes", clazz.getName, payloadBytes)
          else check()
        }
      }
      check()
    }
} 
开发者ID:love1314sea,项目名称:akka-2.3.16,代码行数:48,代码来源:RemoteMetricsExtension.scala


示例3: State

//设置package包名称以及导入依赖的类
package com.github.btesila.weather.monitor.state.mgmt

import java.util.concurrent.ConcurrentHashMap

import com.github.btesila.weather.monitor.model.{ActiveLocationRecord, CurrentWeatherConditions, Location}
import org.joda.time._

import scala.collection._
import scala.collection.convert.decorateAsScala._


object State {

  val activeLocationRecords: concurrent.Map[String, ActiveLocationRecord] =
    new ConcurrentHashMap[String, ActiveLocationRecord]().asScala

  def recordIdFor(location: Location): String = recordIdFor(location.city.toLowerCase, location.country.toLowerCase)

  def recordIdFor(city: String, country: String): String = s"${city.toLowerCase}-${country.toLowerCase}"

  def findActiveLocation(city: String, country: String): Option[ActiveLocationRecord] = {
    val id = recordIdFor(city, country)
    activeLocationRecords.get(id) match {
      case Some(record) => {
        activeLocationRecords.put(id, record.copy(lastTriggered = DateTime.now))
        Some(record)
      }
      case _ => None
    }
  }

  def addLocationRecord(location: Location, crtWeatherConditions: CurrentWeatherConditions): Unit = {
    val id = recordIdFor(location)
    activeLocationRecords.put(id, ActiveLocationRecord(DateTime.now, location, crtWeatherConditions))
  }

  def updateLocationRecord(id: String, crtConditions: CurrentWeatherConditions): Unit =
    activeLocationRecords.get(id).foreach { record =>
      activeLocationRecords.put(id, record.copy(crtConditions = crtConditions))
    }

  def removeLocationRecord(entry: (String, ActiveLocationRecord)): Unit = {
    val (id, activeLocation) = entry
    activeLocationRecords.remove(id, activeLocation)
  }
} 
开发者ID:Bii03,项目名称:weather-monitor,代码行数:47,代码来源:State.scala


示例4: Session

//设置package包名称以及导入依赖的类
package com.spooky.bittorrent.l.session

import com.spooky.bittorrent.l.file.TorrentFileManager
import com.spooky.bittorrent.model.PeerId
import com.spooky.bittorrent.InfoHash
import java.util.concurrent.ConcurrentHashMap
import com.spooky.bittorrent.l.session.client.ClientSession
import java.util.function.Function
import com.spooky.cipher.MSEKeyPair
import com.spooky.bittorrent.model.TorrentStatistics
import spooky.actor.ActorRef

class Session(val fileManager: TorrentFileManager, val peerId: PeerId, val announce: ActorRef) extends ViewableSession {
	private val torrent = fileManager.torrent
  private val infoHash: InfoHash = fileManager.torrent.infoHash
  private val clients = new ConcurrentHashMap[PeerId, ClientSession]
  @volatile private var activeListeners: Int = 0
  @volatile private var activeClients: Int = 0



  def statistics: TorrentStatistics = {
    TorrentStatistics(torrent.infoHash, torrent.info.length, torrent.info.length, 0, 0)
  }

  def init(peerId: PeerId, keyPair: MSEKeyPair): ClientSession = { //not used
    clients.put(peerId, new ClientSession(peerId, keyPair))
  }
  //announce peers

} 
开发者ID:zpooky,项目名称:bittorrent,代码行数:32,代码来源:Session.scala


示例5: Sessions

//设置package包名称以及导入依赖的类
package com.spooky.bittorrent.l.session

import scala.collection.JavaConversions._
import com.spooky.bittorrent.model.PeerId
import com.spooky.bittorrent.l.file.TorrentFileManager
import java.util.concurrent.ConcurrentHashMap
import com.spooky.bittorrent.model.TorrentSetup
import com.spooky.bittorrent.Checksum
import com.spooky.bittorrent.InfoHash
import com.spooky.bittorrent.model.EnrichedTorrentSetup
import spooky.actor.ActorSystem
import com.spooky.bittorrent.actors.AnnounceActor

class Sessions(actorSystem: ActorSystem) {
  private val sessions = new ConcurrentHashMap[InfoHash, Session]
  def get(infoHash: InfoHash): Option[Session] = Option(sessions.get(infoHash))
  def register(setup: EnrichedTorrentSetup): Session = setup match {
    case EnrichedTorrentSetup(torrent, root, state) => {
      val session = get(torrent.infoHash)
      session.getOrElse({
        val fileManager = TorrentFileManager(torrent, root, state)
        val peerId = PeerId.create
        val session = new Session(fileManager, peerId, actorSystem.actorOf(AnnounceActor.props))
        sessions.put(torrent.infoHash, session)
        session
      })
    }
  }
  def infoHashes: List[InfoHash] = sessions.keys().toList
} 
开发者ID:zpooky,项目名称:bittorrent,代码行数:31,代码来源:Sessions.scala


示例6: TcpWriteActor

//设置package包名称以及导入依赖的类
package spooky.io

import java.nio.channels.SocketChannel
import spooky.actor.Props
import spooky.actor.Actor
import java.nio.ByteBuffer
import spooky.actor.ActorRef
import spooky.io.TcpThread._
import java.util.concurrent.ConcurrentHashMap

object TcpWriteActor {
  def props(channel: SocketChannel, actors: ConcurrentHashMap[Tcp.Address, Tuple2[MessageActorRef, WriteActorRef]]): Props = Props(classOf[TcpWriteActor], channel, actors)
}
private[io] class TcpWriteActor(private val channel: SocketChannel, actors: ConcurrentHashMap[Tcp.Address, Tuple2[MessageActorRef, WriteActorRef]]) extends Actor {

  def receive: PartialFunction[Any, Unit] = {
    case Tcp.Register(messageActor, address, _, _) => {
      val previous = actors.put(address, (messageActor, self))
      assert(previous == null)
      context.become(traffic())
    }

  }
  private def traffic(): PartialFunction[Any, Unit] = {
//    case Tcp.Close => {
//      sender() ! Tcp.Closed
//    }
    case Tcp.Write(data, Tcp.NoAck) => {
      write(data.toByteBuffer)
    }
    case Tcp.Write(data, ack) => {
      if (write(data.toByteBuffer)) {
        sender ! ack
      }
    }
  }

  private def write(bb: ByteBuffer): Boolean = {
    if (!channel.isConnected) {
      Thread.currentThread.interrupt()
      false
    } else {
      while (bb.hasRemaining()) {
        channel.write(bb)
      }
      true
    }
  }
} 
开发者ID:zpooky,项目名称:bittorrent,代码行数:50,代码来源:TcpWriteActor.scala


示例7: ExampleForeachHeader

//设置package包名称以及导入依赖的类
package ppl.delite.runtime.codegen.kernels.scala.examples

import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.locks.ReentrantLock



object ExampleForeachHeader {
  def apply(in0: Array[Int], in1: Array[Double], in2: Double) = new ExampleForeachHeader(in0, in1, in2)
}

final class ExampleForeachHeader(in0: Array[Int], in1: Array[Double], in2: Double) {

  //this is the apply method of another (kernel) object: shouldn't be generated
  def kernel_apply(in0: Array[Int], in1: Array[Double], in2: Double): Foreach = {
    new Foreach {
      def in = in0
      def foreach(elem: Int) { in1(elem) = (in1(elem-1) + in1(elem) + in1(elem+1))/3 }
      def sync(idx: Int) = List(in0(idx-1), in0(idx), in0(idx+1))
    }
  }

  abstract class Foreach {
    def in: Array[Int]
    def foreach(elem: Int)
    def sync(idx: Int): List[Any]
  }

  val closure = kernel_apply(in0, in1, in2)
  val lockMap = new ConcurrentHashMap[Any, ReentrantLock]
}

object ExampleForeach {

  def apply(foreach: ExampleForeachHeader) {
    val in = foreach.closure.in
    val size = in.size
    var i = size*2/4 //size*chunkIdx/numChunks
    val end = size*3/4 //size*(chunkIdx+1)/numChunks

    while (i < end) {
      val sync = foreach.closure.sync(i).sortBy(System.identityHashCode(_)) //TODO: optimize locking mechanism
      for (e <- sync) {
        foreach.lockMap.putIfAbsent(e, new ReentrantLock)
        foreach.lockMap.get(e).lock
      }

      foreach.closure.foreach(in(i))

      for (e <- sync.reverse) {
        foreach.lockMap.get(e).unlock
      }

      i += 1
    }
  }
} 
开发者ID:leratojeffrey,项目名称:OptiSDR-Compiler,代码行数:58,代码来源:ExampleForeach.scala


示例8: DispatcherThreadFactory

//设置package包名称以及导入依赖的类
package knot.core.dispatch

import java.util.concurrent.atomic.AtomicInteger
import java.util.concurrent.{ConcurrentHashMap, ThreadFactory}

object DispatcherThreadFactory {
  private val threadNumbers = new ConcurrentHashMap[String, AtomicInteger]()

  def getNewTheadNumber(prefix: String): Int = {
    threadNumbers.computeIfAbsent(prefix, _ => new AtomicInteger(1)).getAndIncrement()
  }
}

class DispatcherThreadFactory(val prefix: String) extends ThreadFactory {

  import DispatcherThreadFactory._

  override def newThread(r: Runnable): Thread = {

    val t = new Thread(r, s"knot-$prefix-${getNewTheadNumber(prefix)}")
    t.setDaemon(true)
    t.setPriority(Thread.NORM_PRIORITY)
    t
  }
} 
开发者ID:defvar,项目名称:knot,代码行数:26,代码来源:DispatcherThreadFactory.scala


示例9: get

//设置package包名称以及导入依赖的类
package knot.core.logging

import java.util.concurrent.ConcurrentHashMap

import knot.core.config.{Configs, LogConfig}
import knot.core.util.Utils.Implicits._

import scala.reflect.ClassTag

trait LoggerFactory {
  def get[T: ClassTag](): Logger

  def get[T: ClassTag](config: LogConfig): Logger

  def get(clazz: Class[_]): Logger

  def get(clazz: Class[_], config: LogConfig): Logger
}

class DefaultLoggerFactory extends LoggerFactory {
  val loggerCache = new ConcurrentHashMap[String, Logger]()

  override def get[T: ClassTag](): Logger = {
    get(Configs.log)
  }

  override def get(clazz: Class[_]): Logger = {
    get(clazz, Configs.log)
  }

  override def get[T: ClassTag](config: LogConfig): Logger = {
    val c = implicitly[ClassTag[T]]
    get(c.runtimeClass, config)
  }

  override def get(clazz: Class[_], config: LogConfig): Logger = {
    Option(clazz) match {
      case Some(c) => loggerCache.computeIfAbsent(c.getName, x => StdOutLogger(x, config.logLevel.toIntOrDefault))
      case None => StdOutLogger("unknown")
    }
  }
} 
开发者ID:defvar,项目名称:knot,代码行数:43,代码来源:LoggerFactory.scala


示例10: ScheduleActor

//设置package包名称以及导入依赖的类
package com.actorsys.crawler


import java.util.concurrent.ConcurrentHashMap

import akka.actor.{Actor, ActorLogging, ActorRef}
import com.actorsys.service.{ScheduledWebUrl, WebUrl}


class ScheduleActor extends Actor with ActorLogging {
  val config = Map[String, Any](
    "domain.black.list" -> Seq("google.com", "facebook.com", "twitter.com"),
    "crawl.retry.times" -> 3,
    "filter.page.url.suffixes" -> Seq(".zip", ".avi", ".mkv", ",mp4")
  )
  val counter = new ConcurrentHashMap[String, Int]()

  override def receive: Receive = {
    case WebUrl(url) => sender ! ScheduledWebUrl(url, config)
    case (link: String, count: Int) => {
      counter.put(link, count)
      log.info("counter:" + counter.toString)
    }
  }
}

object ScheduleActor {
  def sendFeeds(crawlerActorRef: ActorRef, seeds: Seq[String]) = {
    seeds.foreach(crawlerActorRef ! _)
  }
} 
开发者ID:yuanyedc,项目名称:actorsys,代码行数:32,代码来源:ScheduleActor.scala


示例11: ProjectQueryHandler

//设置package包名称以及导入依赖的类
package fairshare.backend.project

import java.util.UUID
import java.util.concurrent.ConcurrentHashMap

import fairshare.backend.eventsourcing.Fact
import fairshare.backend.eventsourcing.journals.PollingJournalReader

import scalaz.concurrent.Task
import scalaz.stream.sink
import scala.collection.JavaConverters._

class ProjectQueryHandler(reader: PollingJournalReader[ProjectEvent]) {
  val projectList = new ConcurrentHashMap[ProjectId, String]()

  private def updateProjectList(fact: Fact[ProjectEvent]): Unit = {
    val projectId = ProjectId(UUID.fromString(fact.subject.key))

    val projectName = fact.event match {
      case ProjectEvent.Created(_, name) => Some(name)
      case ProjectEvent.NameModified(name) => Some(name)
      case _ => None
    }

    for {
      name <- projectName
    } projectList.put(projectId, name)
  }

  val listUpdater = reader.readUpdates.to(
    sink.lift[Task, Fact[ProjectEvent]] {
      fact =>
        Task.delay {
          updateProjectList(fact)
        }
    }
  )

  listUpdater.run.runAsync(_ => ())

  def getAll: Map[ProjectId, String] = projectList.asScala.toMap

  def getById(projectId: ProjectId): Option[String] = projectList.asScala.get(projectId)
} 
开发者ID:artempyanykh,项目名称:fair-share,代码行数:45,代码来源:ProjectQueryHandler.scala


示例12: ConnectionPool

//设置package包名称以及导入依赖的类
package com.redislabs.provider.redis

import redis.clients.jedis.{JedisPoolConfig, Jedis, JedisPool}
import redis.clients.jedis.exceptions.JedisConnectionException

import java.util.concurrent.ConcurrentHashMap

import scala.collection.JavaConversions._


object ConnectionPool {
  @transient private lazy val pools: ConcurrentHashMap[RedisEndpoint, JedisPool] =
    new ConcurrentHashMap[RedisEndpoint, JedisPool]()
  def connect(re: RedisEndpoint): Jedis = {
    val pool = pools.getOrElseUpdate(re,
      {
        val poolConfig: JedisPoolConfig = new JedisPoolConfig();
        poolConfig.setMaxTotal(250)
        poolConfig.setMaxIdle(32)
        poolConfig.setTestOnBorrow(false)
        poolConfig.setTestOnReturn(false)
        poolConfig.setTestWhileIdle(false)
        poolConfig.setMinEvictableIdleTimeMillis(60000)
        poolConfig.setTimeBetweenEvictionRunsMillis(30000)
        poolConfig.setNumTestsPerEvictionRun(-1)
        new JedisPool(poolConfig, re.host, re.port, re.timeout, re.auth, re.dbNum)
      }
    )
    var sleepTime: Int = 4
    var conn: Jedis = null
    while (conn == null) {
      try {
        conn = pool.getResource
      }
      catch {
        case e: JedisConnectionException if e.getCause.toString.
          contains("ERR max number of clients reached") => {
          if (sleepTime < 500) sleepTime *= 2
          Thread.sleep(sleepTime)
        }
        case e: Exception => throw e
      }
    }
    conn
  }
} 
开发者ID:wuzhongdehua,项目名称:spark_redis,代码行数:47,代码来源:ConnectionPool.scala


示例13: TimedFlowOps

//设置package包名称以及导入依赖的类
package com.flipkart.connekt.busybees.streams.flows.profilers

import java.util.concurrent.{ConcurrentHashMap, TimeUnit}

import akka.stream.scaladsl.GraphDSL.Implicits._
import akka.stream.scaladsl.{BidiFlow, Flow, GraphDSL}
import akka.stream.{BidiShape, FlowShape}
import com.flipkart.connekt.busybees.models.RequestTracker
import com.flipkart.connekt.commons.factories.{ConnektLogger, LogFile}
import com.flipkart.connekt.commons.metrics.Instrumented
import com.flipkart.connekt.commons.utils.StringUtils._

import scala.collection.JavaConverters._
import scala.util.Try

object TimedFlowOps {

  implicit class TimedFlow[I, O, T <: RequestTracker, M](dispatchFlow: Flow[(I, T), (Try[O], T), M]) extends Instrumented {

    val startTimes = new ConcurrentHashMap[T, Long]().asScala

    private def profilingShape(apiName: String) = BidiFlow.fromGraph(GraphDSL.create() { implicit b =>

      val out = b.add(Flow[(I, T)].map {
        case (request, requestTracker) =>
          startTimes.put(requestTracker, System.currentTimeMillis())
          (request, requestTracker)
      })

      val in = b.add(Flow[(Try[O], T)].map {
        case (response, httpRequestTracker) =>
          startTimes.get(httpRequestTracker).map(start => {
            startTimes.remove(httpRequestTracker)
            val duration = System.currentTimeMillis() - start
            ConnektLogger(LogFile.PROCESSORS).trace(s"TimedFlowOps/$apiName MessageId: ${httpRequestTracker.messageId} took : $duration ms")
            duration
          }).foreach(registry.timer(getMetricName(apiName + Option(httpRequestTracker.provider).map("." + _).orEmpty)).update(_, TimeUnit.MILLISECONDS))

          (response, httpRequestTracker)
      })

      BidiShape.fromFlows(out, in)
    })

    def timedAs(apiName: String) = Flow.fromGraph(GraphDSL.create() { implicit b =>
      val s = b.add(profilingShape(apiName))
      val p = b.add(dispatchFlow)

      s.out1 ~> p ~> s.in2

      FlowShape(s.in1, s.out2)
    })
  }

} 
开发者ID:ayush03agarwal,项目名称:connekt,代码行数:56,代码来源:TimedFlowOps.scala


示例14: createNewExecutionContext

//设置package包名称以及导入依赖的类
package com.smule.smg

import java.util.concurrent.{Executors, ConcurrentHashMap}

import scala.collection.concurrent
import scala.collection.JavaConversions._

import play.libs.Akka

import scala.concurrent.ExecutionContext


  val rrdGraphCtx: ExecutionContext = Akka.system.dispatchers.lookup("akka-contexts.rrd-graph")

  val monitorCtx: ExecutionContext = Akka.system.dispatchers.lookup("akka-contexts.monitor")

  private val log = SMGLogger

  private val ctxMap: concurrent.Map[Int,ExecutionContext] = new ConcurrentHashMap[Int, ExecutionContext]()

  private def createNewExecutionContext(maxThreads: Int): ExecutionContext = {
    val es = Executors.newFixedThreadPool(maxThreads)
    ExecutionContext.fromExecutorService(es)
  }

  def ctxForInterval(interval: Int): ExecutionContext = {
    ctxMap(interval)
  }

  def initializeUpdateContexts(intervals: Seq[Int], threadsPerIntervalMap: Map[Int,Int], defaultThreadsPerInterval: Int): Unit = {
    intervals.foreach { interval =>
      if (!ctxMap.contains(interval)) {
        val maxThreads = if (threadsPerIntervalMap.contains(interval)) threadsPerIntervalMap(interval) else defaultThreadsPerInterval
        val ec = createNewExecutionContext(maxThreads)
        ctxMap(interval) = ec
        log.info("ExecutionContexts.initializeUpdateContexts: Created ExecutionContext for interval=" + interval +
          " maxThreads=" + maxThreads + " ec.class="+ ec.getClass.getName)
      }
    }
  }
} 
开发者ID:asen,项目名称:smg,代码行数:42,代码来源:ExecutionContexts.scala


示例15: PriorKnowledgeTransporter

//设置package包名称以及导入依赖的类
package com.twitter.finagle.http2.transport

import com.twitter.cache.FutureCache
import com.twitter.finagle.client.Transporter
import com.twitter.finagle.http2.Http2Transporter
import com.twitter.finagle.http2.transport.Http2ClientDowngrader.StreamMessage
import com.twitter.finagle.transport.Transport
import com.twitter.util.Future
import java.net.SocketAddress
import java.util.concurrent.ConcurrentHashMap


private[http2] class PriorKnowledgeTransporter(
    underlying: Transporter[Any, Any])
  extends Transporter[Any, Any] {

  private[this] val cache = new ConcurrentHashMap[SocketAddress, Future[MultiplexedTransporter]]()

  private[this] val fn: SocketAddress => Future[MultiplexedTransporter] = { addr: SocketAddress =>
    underlying(addr).map { transport =>
      val multi = new MultiplexedTransporter(
        Transport.cast[StreamMessage, StreamMessage](transport),
        addr
      )
      multi.onClose.ensure {
        cache.remove(addr, multi)
      }
      multi
    }
  }

  private[this] val cachedFn = FutureCache.fromMap(fn, cache)

  def apply(addr: SocketAddress): Future[Transport[Any, Any]] =
    cachedFn(addr).map { multi => Http2Transporter.unsafeCast(multi()) }
} 
开发者ID:wenkeyang,项目名称:finagle,代码行数:37,代码来源:PriorKnowledgeTransporter.scala


示例16: TracerCache

//设置package包名称以及导入依赖的类
package com.twitter.finagle.zipkin.core

import collection.JavaConverters._
import com.twitter.conversions.time._
import com.twitter.util.{TimeoutException, Future, Await}
import java.util.concurrent.ConcurrentHashMap


private[zipkin] class TracerCache[T <: RawZipkinTracer] {
  // to make sure we only create one instance of the tracer per host, port & category
  private[this] val map = new ConcurrentHashMap[String, T].asScala

  def getOrElseUpdate(key: String, mk: => T): T =
    synchronized(map.getOrElseUpdate(key, mk))

  // Try to flush the tracers when we shut
  // down. We give it 100ms.
  Runtime.getRuntime.addShutdownHook(new Thread {
    setName("RawZipkinTracer-ShutdownHook")
    override def run() {
      val tracers = synchronized(map.values.toSeq)
      val joined = Future.join(tracers map(_.flush()))
      try {
        Await.result(joined, 100.milliseconds)
      } catch {
        case _: TimeoutException =>
          System.err.println("Failed to flush all traces before quitting")
      }
    }
  })
} 
开发者ID:wenkeyang,项目名称:finagle,代码行数:32,代码来源:TracerCache.scala


示例17: resolve

//设置package包名称以及导入依赖的类
package indi.lewis.spider.http

import java.net.InetAddress
import java.util.concurrent.ConcurrentHashMap



  private val dnsMap  = new ConcurrentHashMap[String, Array[InetAddress]]


  override def resolve(host: String): Array[InetAddress] = {
    var ret=dnsMap.get(host)
    if(ret == null){
      synchronized {
        ret= dnsMap.get(host)
        if(ret==null){
          ret=InetAddress.getAllByName(host)
          if(ret !=null && ret.length>0){
            dnsMap.put(host,ret)
          }
        }
      }
    }
    ret
  }
}

object UserDnsResolver{

  private val userDnsResolver=new UserDnsResolver;

  def get():UserDnsResolver=userDnsResolver
} 
开发者ID:TokisakiFun,项目名称:Katipo,代码行数:34,代码来源:UserDnsResolver.scala


示例18: InboundConnectionFilter

//设置package包名称以及导入依赖的类
package com.wavesplatform.network

import java.net.{InetAddress, InetSocketAddress}
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.atomic.AtomicInteger

import io.netty.channel.ChannelHandler.Sharable
import io.netty.channel.{ChannelFuture, ChannelHandlerContext}
import io.netty.handler.ipfilter.AbstractRemoteAddressFilter
import scorex.utils.ScorexLogging

@Sharable
class InboundConnectionFilter(peerDatabase: PeerDatabase, maxInboundConnections: Int, maxConnectionsPerHost: Int)
  extends AbstractRemoteAddressFilter[InetSocketAddress] with ScorexLogging {
  private val inboundConnectionCount = new AtomicInteger(0)
  private val perHostConnectionCount = new ConcurrentHashMap[InetAddress, Int]

  private def dec(remoteAddress: InetAddress) = {
    inboundConnectionCount.decrementAndGet()
    perHostConnectionCount.compute(remoteAddress, (_, cnt) => cnt - 1)
    null.asInstanceOf[ChannelFuture]
  }

  override def accept(ctx: ChannelHandlerContext, remoteAddress: InetSocketAddress) = {
    val newTotal = inboundConnectionCount.incrementAndGet()
    val newCountPerHost = perHostConnectionCount.compute(remoteAddress.getAddress, (_, cnt) => Option(cnt).fold(1)(_ + 1))
    val isBlacklisted = peerDatabase.blacklistedHosts.contains(remoteAddress.getAddress)

    log.trace(s"Check inbound connection from $remoteAddress: new inbound total = $newTotal, " +
      s"connections with this host = $newCountPerHost, address ${if (isBlacklisted) "IS" else "is not"} blacklisted")

    newTotal <= maxInboundConnections &&
      newCountPerHost <= maxConnectionsPerHost &&
      !isBlacklisted
  }

  override def channelAccepted(ctx: ChannelHandlerContext, remoteAddress: InetSocketAddress) =
    ctx.channel().closeFuture().addListener((_: ChannelFuture) => dec(remoteAddress.getAddress))

  override def channelRejected(ctx: ChannelHandlerContext, remoteAddress: InetSocketAddress) =
    dec(remoteAddress.getAddress)
} 
开发者ID:wavesplatform,项目名称:Waves,代码行数:43,代码来源:InboundConnectionFilter.scala


示例19: NetworkSender

//设置package包名称以及导入依赖的类
package com.wavesplatform.it.util

import java.net.InetSocketAddress
import java.util.concurrent.ConcurrentHashMap

import com.wavesplatform.it.network.client.NetworkClient
import com.wavesplatform.network.{PeerInfo, RawBytes}
import io.netty.channel.Channel
import io.netty.channel.group.DefaultChannelGroup
import io.netty.util.HashedWheelTimer
import io.netty.util.concurrent.GlobalEventExecutor

import scala.collection.JavaConverters._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.concurrent.duration._

class NetworkSender(address: InetSocketAddress, chainId: Char, name: String, nonce: Long) {
  private val retryTimer = new HashedWheelTimer()
  val allChannels = new DefaultChannelGroup(GlobalEventExecutor.INSTANCE)
  val establishedConnections = new ConcurrentHashMap[Channel, PeerInfo]
  val c = new NetworkClient(chainId, name, nonce, allChannels, establishedConnections)
  c.connect(address)

  def sendByNetwork(messages: RawBytes*): Future[Unit] = {
    retryTimer.retryUntil(Future.successful(establishedConnections.size()), (size: Int) => size == 1, 1.seconds)
      .map(_ => {
      val channel = establishedConnections.asScala.head._1
      messages.foreach(msg => {
        channel.writeAndFlush(msg)
      })
    })
  }

  def close(): Unit = {
    retryTimer.stop()
    c.shutdown()
  }
} 
开发者ID:wavesplatform,项目名称:Waves,代码行数:40,代码来源:NetworkSender.scala


示例20: KafkaSink

//设置package包名称以及导入依赖的类
package com.thenetcircle.event_dispatcher.sink

import java.util.concurrent.ConcurrentHashMap

import akka.NotUsed
import akka.kafka.ProducerMessage
import akka.kafka.ProducerMessage.Message
import akka.kafka.scaladsl.Producer
import akka.stream.scaladsl.{ Flow, Keep }
import com.thenetcircle.event_dispatcher.Event
import com.thenetcircle.event_dispatcher.driver.adapter.KafkaSinkAdapter
import com.thenetcircle.event_dispatcher.driver.extractor.Extractor
import com.thenetcircle.event_dispatcher.driver.{ KafkaKey, KafkaValue }
import org.apache.kafka.clients.producer.{ KafkaProducer, ProducerRecord }

object KafkaSink {

  lazy private val producerList = new ConcurrentHashMap[String, KafkaProducer[KafkaKey, KafkaValue]]

  def apply(
      settings: KafkaSinkSettings
  ): Flow[Event, ProducerMessage.Result[KafkaKey, KafkaValue, NotUsed.type], NotUsed] = {

    val producerName = settings.name
    val producerSettings = settings.producerSettings
    val producer: KafkaProducer[KafkaKey, KafkaValue] = if (producerList.containsKey(producerName)) {
      producerList.get(producerName)
    } else {
      producerSettings.createKafkaProducer()
    }

    val kafkaProducerFlow = Flow[ProducerRecord[KafkaKey, KafkaValue]]
      .map(Message(_, NotUsed))
      .via(Producer.flow(settings.producerSettings, producer))

    Flow[Event]
      .map(Extractor.deExtract)
      .map(KafkaSinkAdapter.unfit)
      .viaMat(kafkaProducerFlow)(Keep.left)

  }
} 
开发者ID:thenetcircle,项目名称:event-dispatcher,代码行数:43,代码来源:KafkaSink.scala



注:本文中的java.util.concurrent.ConcurrentHashMap类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Scala ProducerRecord类代码示例发布时间:2022-05-23
下一篇:
Scala AtomicLong类代码示例发布时间:2022-05-23
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap