LCA-GPT/QA/classify.ipynb

80 KiB
Raw Permalink Blame History

In [5]:
import pandas as pd
import os
from langchain_community.llms import QianfanLLMEndpoint,Tongyi
from langchain.chains import LLMChain
from langchain_core.prompts import ChatPromptTemplate
from langchain.prompts import PromptTemplate
In [6]:
api = "xxxxxx"
sk = "xxxxx"

llm = QianfanLLMEndpoint(model="ERNIE-4.0-8K",qianfan_ak=api,qianfan_sk=sk)
In [ ]:
def classify(text):
    prompt = PromptTemplate(
        input_variables=["prompt"],
        template='''下面给出国民经济行业分类及其子类。
        农、林、牧、渔业包括:农业;林业;畜牧业;渔业;
        科学研究和技术服务业包括:研究和试验发展;专业技术服务业;科技推广和应用服务业;
        制造业包括:农副食品加工业;食品制造业;酒、饮料和精制茶制造业;烟草制品业;纺织业;纺织服装、服饰业;皮革、毛皮、羽毛及其制品和制鞋业;木材加工和木、竹、藤、棕、草制品业;家具制造业;造纸和纸制品业;印刷和记录媒介复制业;文教、工美、体育和娱乐用品制造业;石油、煤炭及其他燃料加工业;化学原料和化学制品制造业;医药制造业;化学纤维制造业;橡胶和塑料制品业;非金属矿物制品业;黑色金属冶炼和压延加工业;有色金属冶炼和压延加工业;金属制品业;通用设备制造业;专用设备制造业;汽车制造业; 铁路、船舶、航空航天和其他运输设备制造业;电气机械和器材制造业;计算机、通信和其他电子设备制造业;仪器仪表制造业;废弃资源综合利用业;金属制品、机械和设备修理业;
        水利、环境和公共设施管理业包括:水利管理业、生态保护和环境治理业;公共设施管理业;土地管理业;
        电力、热力、燃气及水生产和供应业包括:电力、热力生产和供应业;燃气生产和供应业;水的生产和供应业;
        建筑业包括:房屋建筑业;土木工程建筑业;建筑安装业;建筑装饰、装修和其他建筑业
        
        输出下面内容属于哪个具体的子类:{prompt}
        答案用#表示,输出格式如下:
        #农副食品加工业#
        '''
    )
    chain = LLMChain(llm = llm,prompt=prompt)
    response = chain.run(
        {"prompt":text}
    )
    print(response)

    return response
In [8]:
query = '''问题什么是生命周期分析LCA的主要目标
答案:生命周期分析旨在评估产品或服务从原材料获取到最终处置的环境影响。'''
classify(query)
/home/zhangxj/miniconda3/envs/Qwen/lib/python3.10/site-packages/langchain_core/_api/deprecation.py:141: LangChainDeprecationWarning: The class `LLMChain` was deprecated in LangChain 0.1.17 and will be removed in 1.0. Use RunnableSequence, e.g., `prompt | llm` instead.
  warn_deprecated(
/home/zhangxj/miniconda3/envs/Qwen/lib/python3.10/site-packages/langchain_core/_api/deprecation.py:141: LangChainDeprecationWarning: The method `Chain.run` was deprecated in langchain 0.1.0 and will be removed in 1.0. Use invoke instead.
  warn_deprecated(
[INFO][2024-11-21 10:51:06.059] oauth.py:228 [t:139775941743232]: trying to refresh access_token for ak `zLiAbX***`
[INFO][2024-11-21 10:51:06.248] oauth.py:243 [t:139775941743232]: sucessfully refresh access_token
科学研究和技术服务业
Out[8]:
'科学研究和技术服务业'
In [ ]:
import re

def normalize(text):

    clean_text = re.sub(r'[\r\n]+', '', text)
    pattern = r'(?<=#)(.*?)(?=#)'
    matches = re.findall(pattern, clean_text)
    
    return matches[-1]
In [ ]:
def classify_csv(path):
    data = pd.read_csv(path)
    class_list = []
    for idx,item in data.iterrows():
        ques = item['question']
        ans = item['answer']
        query = "问题:"+ques+"\n答案:"+ans
        res = classify(query)
        res = normalize(res)
        class_list.append(res)
        
    return class_list
In [33]:
def save_list(input,path):
    df = pd.DataFrame(input,columns=["类别"])
    df.to_csv(path,index = False)
In [ ]:
 
In [48]:
first_path = "/home/zhangxj/WorkFile/LCA-GPT/QA/split"
filenames = os.listdir(first_path)

for file in filenames:
    if file == "output_1.csv":
        continue
    file_path = os.path.join(first_path, file)
    res = classify_csv(file_path)
    clean_res = normalize(res)

    path = "/home/zhangxj/WorkFile/LCA-GPT/QA/classify"+file
    save_list(res,path)
#科学研究和技术服务业#
#科学研究和技术服务业#
#科学研究和技术服务业#
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
Cell In[48], line 8
      6     continue
      7 file_path = os.path.join(first_path, file)
----> 8 res = classify_csv(file_path)
      9 clean_res = normalize(res)
     11 path = "/home/zhangxj/WorkFile/LCA-GPT/QA/classify"+file

Cell In[32], line 8, in classify_csv(path)
      6     ans = item['answer']
      7     query = "问题:"+ques+"\n答案:"+ans
----> 8     res = classify(query)
      9     class_list.append(res)
     11 return class_list

Cell In[31], line 15, in classify(text)
      2 prompt = PromptTemplate(
      3     input_variables=["prompt"],
      4     template='''我国国民经济行业共有20个门类如下
   (...)
     12     '''
     13 )
     14 chain = LLMChain(llm = llm,prompt=prompt)
---> 15 response = chain.run(
     16     {"prompt":text}
     17 )
     18 print(response)
     20 return response

File ~/miniconda3/envs/Qwen/lib/python3.10/site-packages/langchain_core/_api/deprecation.py:170, in deprecated.<locals>.deprecate.<locals>.warning_emitting_wrapper(*args, **kwargs)
    168     warned = True
    169     emit_warning()
--> 170 return wrapped(*args, **kwargs)

File ~/miniconda3/envs/Qwen/lib/python3.10/site-packages/langchain/chains/base.py:598, in Chain.run(self, callbacks, tags, metadata, *args, **kwargs)
    596     if len(args) != 1:
    597         raise ValueError("`run` supports only one positional argument.")
--> 598     return self(args[0], callbacks=callbacks, tags=tags, metadata=metadata)[
    599         _output_key
    600     ]
    602 if kwargs and not args:
    603     return self(kwargs, callbacks=callbacks, tags=tags, metadata=metadata)[
    604         _output_key
    605     ]

File ~/miniconda3/envs/Qwen/lib/python3.10/site-packages/langchain_core/_api/deprecation.py:170, in deprecated.<locals>.deprecate.<locals>.warning_emitting_wrapper(*args, **kwargs)
    168     warned = True
    169     emit_warning()
--> 170 return wrapped(*args, **kwargs)

File ~/miniconda3/envs/Qwen/lib/python3.10/site-packages/langchain/chains/base.py:381, in Chain.__call__(self, inputs, return_only_outputs, callbacks, tags, metadata, run_name, include_run_info)
    349 """Execute the chain.
    350 
    351 Args:
   (...)
    372         `Chain.output_keys`.
    373 """
    374 config = {
    375     "callbacks": callbacks,
    376     "tags": tags,
    377     "metadata": metadata,
    378     "run_name": run_name,
    379 }
--> 381 return self.invoke(
    382     inputs,
    383     cast(RunnableConfig, {k: v for k, v in config.items() if v is not None}),
    384     return_only_outputs=return_only_outputs,
    385     include_run_info=include_run_info,
    386 )

File ~/miniconda3/envs/Qwen/lib/python3.10/site-packages/langchain/chains/base.py:164, in Chain.invoke(self, input, config, **kwargs)
    162 except BaseException as e:
    163     run_manager.on_chain_error(e)
--> 164     raise e
    165 run_manager.on_chain_end(outputs)
    167 if include_run_info:

File ~/miniconda3/envs/Qwen/lib/python3.10/site-packages/langchain/chains/base.py:154, in Chain.invoke(self, input, config, **kwargs)
    151 try:
    152     self._validate_inputs(inputs)
    153     outputs = (
--> 154         self._call(inputs, run_manager=run_manager)
    155         if new_arg_supported
    156         else self._call(inputs)
    157     )
    159     final_outputs: Dict[str, Any] = self.prep_outputs(
    160         inputs, outputs, return_only_outputs
    161     )
    162 except BaseException as e:

File ~/miniconda3/envs/Qwen/lib/python3.10/site-packages/langchain/chains/llm.py:126, in LLMChain._call(self, inputs, run_manager)
    121 def _call(
    122     self,
    123     inputs: Dict[str, Any],
    124     run_manager: Optional[CallbackManagerForChainRun] = None,
    125 ) -> Dict[str, str]:
--> 126     response = self.generate([inputs], run_manager=run_manager)
    127     return self.create_outputs(response)[0]

File ~/miniconda3/envs/Qwen/lib/python3.10/site-packages/langchain/chains/llm.py:138, in LLMChain.generate(self, input_list, run_manager)
    136 callbacks = run_manager.get_child() if run_manager else None
    137 if isinstance(self.llm, BaseLanguageModel):
--> 138     return self.llm.generate_prompt(
    139         prompts,
    140         stop,
    141         callbacks=callbacks,
    142         **self.llm_kwargs,
    143     )
    144 else:
    145     results = self.llm.bind(stop=stop, **self.llm_kwargs).batch(
    146         cast(List, prompts), {"callbacks": callbacks}
    147     )

File ~/miniconda3/envs/Qwen/lib/python3.10/site-packages/langchain_core/language_models/llms.py:750, in BaseLLM.generate_prompt(self, prompts, stop, callbacks, **kwargs)
    742 def generate_prompt(
    743     self,
    744     prompts: List[PromptValue],
   (...)
    747     **kwargs: Any,
    748 ) -> LLMResult:
    749     prompt_strings = [p.to_string() for p in prompts]
--> 750     return self.generate(prompt_strings, stop=stop, callbacks=callbacks, **kwargs)

File ~/miniconda3/envs/Qwen/lib/python3.10/site-packages/langchain_core/language_models/llms.py:944, in BaseLLM.generate(self, prompts, stop, callbacks, tags, metadata, run_name, run_id, **kwargs)
    929 if (self.cache is None and get_llm_cache() is None) or self.cache is False:
    930     run_managers = [
    931         callback_manager.on_llm_start(
    932             dumpd(self),
   (...)
    942         )
    943     ]
--> 944     output = self._generate_helper(
    945         prompts, stop, run_managers, bool(new_arg_supported), **kwargs
    946     )
    947     return output
    948 if len(missing_prompts) > 0:

File ~/miniconda3/envs/Qwen/lib/python3.10/site-packages/langchain_core/language_models/llms.py:787, in BaseLLM._generate_helper(self, prompts, stop, run_managers, new_arg_supported, **kwargs)
    785     for run_manager in run_managers:
    786         run_manager.on_llm_error(e, response=LLMResult(generations=[]))
--> 787     raise e
    788 flattened_outputs = output.flatten()
    789 for manager, flattened_output in zip(run_managers, flattened_outputs):

File ~/miniconda3/envs/Qwen/lib/python3.10/site-packages/langchain_core/language_models/llms.py:774, in BaseLLM._generate_helper(self, prompts, stop, run_managers, new_arg_supported, **kwargs)
    764 def _generate_helper(
    765     self,
    766     prompts: List[str],
   (...)
    770     **kwargs: Any,
    771 ) -> LLMResult:
    772     try:
    773         output = (
--> 774             self._generate(
    775                 prompts,
    776                 stop=stop,
    777                 # TODO: support multiple run managers
    778                 run_manager=run_managers[0] if run_managers else None,
    779                 **kwargs,
    780             )
    781             if new_arg_supported
    782             else self._generate(prompts, stop=stop)
    783         )
    784     except BaseException as e:
    785         for run_manager in run_managers:

File ~/miniconda3/envs/Qwen/lib/python3.10/site-packages/langchain_core/language_models/llms.py:1508, in LLM._generate(self, prompts, stop, run_manager, **kwargs)
   1505 new_arg_supported = inspect.signature(self._call).parameters.get("run_manager")
   1506 for prompt in prompts:
   1507     text = (
-> 1508         self._call(prompt, stop=stop, run_manager=run_manager, **kwargs)
   1509         if new_arg_supported
   1510         else self._call(prompt, stop=stop, **kwargs)
   1511     )
   1512     generations.append([Generation(text=text)])
   1513 return LLMResult(generations=generations)

File ~/miniconda3/envs/Qwen/lib/python3.10/site-packages/langchain_community/llms/baidu_qianfan_endpoint.py:266, in QianfanLLMEndpoint._call(self, prompt, stop, run_manager, **kwargs)
    264 params = self._convert_prompt_msg_params(prompt, **kwargs)
    265 params["stop"] = stop
--> 266 response_payload = self.client.do(**params)
    268 return response_payload["result"]

File ~/miniconda3/envs/Qwen/lib/python3.10/site-packages/qianfan/resources/llm/completion.py:202, in Completion.do(self, prompt, model, endpoint, stream, retry_count, request_timeout, request_id, backoff_factor, **kwargs)
    199 if request_id is not None:
    200     kwargs["request_id"] = request_id
--> 202 return self._do(
    203     model,
    204     stream,
    205     retry_count,
    206     request_timeout,
    207     backoff_factor,
    208     endpoint=endpoint,
    209     **kwargs,
    210 )

File ~/miniconda3/envs/Qwen/lib/python3.10/site-packages/qianfan/resources/llm/base.py:555, in BaseResource._do(self, model, stream, retry_count, request_timeout, backoff_factor, retry_jitter, retry_err_codes, retry_max_wait_interval, **kwargs)
    539 """
    540 qianfan resource basic do
    541 
   (...)
    544 
    545 """
    546 retry_config = self.generate_retry_config(
    547     retry_count=retry_count,
    548     request_timeout=request_timeout,
   (...)
    552     retry_max_wait_interval=retry_max_wait_interval,
    553 )
--> 555 return self._request(model, stream, retry_config, **kwargs)

File ~/miniconda3/envs/Qwen/lib/python3.10/site-packages/qianfan/resources/llm/base.py:674, in BaseResourceV1._request(self, model, stream, retry_config, show_total_latency, **kwargs)
    672 while True:
    673     try:
--> 674         resp = self._client.llm(
    675             endpoint=endpoint,
    676             header=self._generate_header(model, stream, **kwargs),
    677             query=self._generate_query(model, stream, **kwargs),
    678             body=self._generate_body(model, stream, **kwargs),
    679             stream=stream,
    680             data_postprocess=self._data_postprocess,
    681             retry_config=retry_config,
    682             show_total_latency=show_total_latency,
    683         )
    684     except errors.APIError as e:
    685         if (
    686             e.error_code == APIErrorCode.UnsupportedMethod
    687             and not refreshed_model_list
    688         ):

File ~/miniconda3/envs/Qwen/lib/python3.10/site-packages/qianfan/resources/requestor/openapi_requestor.py:473, in QfAPIRequestor.llm(self, endpoint, header, query, body, stream, data_postprocess, retry_config, show_total_latency)
    464     else:
    465         return self._compensate_token_usage_non_stream(
    466             self._request(
    467                 req,
   (...)
    470             token_count,
    471         )
--> 473 return self._with_retry(retry_config, _helper)

File ~/miniconda3/envs/Qwen/lib/python3.10/site-packages/qianfan/resources/requestor/base.py:435, in BaseAPIRequestor._with_retry(self, config, func, *args)
    422 @retry(
    423     wait=wait_exponential_jitter(
    424         initial=config.backoff_factor,
   (...)
    431 )
    432 def _retry_wrapper(*args: Any) -> _T:
    433     return func(*args)
--> 435 return _retry_wrapper(*args)

File ~/miniconda3/envs/Qwen/lib/python3.10/site-packages/tenacity/__init__.py:336, in BaseRetrying.wraps.<locals>.wrapped_f(*args, **kw)
    334 copy = self.copy()
    335 wrapped_f.statistics = copy.statistics  # type: ignore[attr-defined]
--> 336 return copy(f, *args, **kw)

File ~/miniconda3/envs/Qwen/lib/python3.10/site-packages/tenacity/__init__.py:475, in Retrying.__call__(self, fn, *args, **kwargs)
    473 retry_state = RetryCallState(retry_object=self, fn=fn, args=args, kwargs=kwargs)
    474 while True:
--> 475     do = self.iter(retry_state=retry_state)
    476     if isinstance(do, DoAttempt):
    477         try:

File ~/miniconda3/envs/Qwen/lib/python3.10/site-packages/tenacity/__init__.py:376, in BaseRetrying.iter(self, retry_state)
    374 result = None
    375 for action in self.iter_state.actions:
--> 376     result = action(retry_state)
    377 return result

File ~/miniconda3/envs/Qwen/lib/python3.10/site-packages/tenacity/__init__.py:398, in BaseRetrying._post_retry_check_actions.<locals>.<lambda>(rs)
    396 def _post_retry_check_actions(self, retry_state: "RetryCallState") -> None:
    397     if not (self.iter_state.is_explicit_retry or self.iter_state.retry_run_result):
--> 398         self._add_action_func(lambda rs: rs.outcome.result())
    399         return
    401     if self.after is not None:

File ~/miniconda3/envs/Qwen/lib/python3.10/concurrent/futures/_base.py:451, in Future.result(self, timeout)
    449     raise CancelledError()
    450 elif self._state == FINISHED:
--> 451     return self.__get_result()
    453 self._condition.wait(timeout)
    455 if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:

File ~/miniconda3/envs/Qwen/lib/python3.10/concurrent/futures/_base.py:403, in Future.__get_result(self)
    401 if self._exception:
    402     try:
--> 403         raise self._exception
    404     finally:
    405         # Break a reference cycle with the exception in self._exception
    406         self = None

File ~/miniconda3/envs/Qwen/lib/python3.10/site-packages/tenacity/__init__.py:478, in Retrying.__call__(self, fn, *args, **kwargs)
    476 if isinstance(do, DoAttempt):
    477     try:
--> 478         result = fn(*args, **kwargs)
    479     except BaseException:  # noqa: B902
    480         retry_state.set_exception(sys.exc_info())  # type: ignore[arg-type]

File ~/miniconda3/envs/Qwen/lib/python3.10/site-packages/qianfan/resources/requestor/base.py:433, in BaseAPIRequestor._with_retry.<locals>._retry_wrapper(*args)
    422 @retry(
    423     wait=wait_exponential_jitter(
    424         initial=config.backoff_factor,
   (...)
    431 )
    432 def _retry_wrapper(*args: Any) -> _T:
--> 433     return func(*args)

File ~/miniconda3/envs/Qwen/lib/python3.10/site-packages/qianfan/resources/requestor/openapi_requestor.py:82, in QfAPIRequestor._retry_if_token_expired.<locals>.retry_wrapper(*args, **kwargs)
     80 if not token_refreshed:
     81     try:
---> 82         return func(*args)
     83     except errors.AccessTokenExpiredError:
     84         # refresh token and set token_refreshed flag
     85         self._auth.refresh_access_token()

File ~/miniconda3/envs/Qwen/lib/python3.10/site-packages/qianfan/resources/requestor/openapi_requestor.py:466, in QfAPIRequestor.llm.<locals>._helper()
    463         return _list_generator(result_list)
    464 else:
    465     return self._compensate_token_usage_non_stream(
--> 466         self._request(
    467             req,
    468             data_postprocess=data_postprocess,
    469         ),
    470         token_count,
    471     )

File ~/miniconda3/envs/Qwen/lib/python3.10/site-packages/qianfan/resources/requestor/base.py:179, in _latency.<locals>.wrapper(requestor, request, *args, **kwargs)
    177 start_time = time.perf_counter()
    178 start_timestamp = int(time.time() * 1000)
--> 179 resp = func(requestor, request, *args, **kwargs)
    180 resp.statistic["total_latency"] = time.perf_counter() - start_time
    181 resp.statistic["start_timestamp"] = start_timestamp

File ~/miniconda3/envs/Qwen/lib/python3.10/site-packages/qianfan/resources/requestor/base.py:318, in BaseAPIRequestor._request(self, request, data_postprocess)
    314 """
    315 simple sync request
    316 """
    317 request = self._preprocess_request(request)
--> 318 response = self._client.request(request)
    319 _check_if_status_code_is_200(response)
    320 try:

File ~/miniconda3/envs/Qwen/lib/python3.10/site-packages/qianfan/resources/http_client.py:69, in HTTPClient.request(self, req)
     65 def request(self, req: QfRequest) -> requests.Response:
     66     """
     67     sync request
     68     """
---> 69     resp = self._session.request(
     70         **req.requests_args(),
     71         timeout=req.retry_config.timeout,
     72         verify=self.ssl,
     73         proxies=self._requests_proxy(),
     74     )
     75     return resp

File ~/miniconda3/envs/Qwen/lib/python3.10/site-packages/requests/sessions.py:589, in Session.request(self, method, url, params, data, headers, cookies, files, auth, timeout, allow_redirects, proxies, hooks, stream, verify, cert, json)
    584 send_kwargs = {
    585     "timeout": timeout,
    586     "allow_redirects": allow_redirects,
    587 }
    588 send_kwargs.update(settings)
--> 589 resp = self.send(prep, **send_kwargs)
    591 return resp

File ~/miniconda3/envs/Qwen/lib/python3.10/site-packages/requests/sessions.py:703, in Session.send(self, request, **kwargs)
    700 start = preferred_clock()
    702 # Send the request
--> 703 r = adapter.send(request, **kwargs)
    705 # Total elapsed time of the request (approximately)
    706 elapsed = preferred_clock() - start

File ~/miniconda3/envs/Qwen/lib/python3.10/site-packages/requests/adapters.py:667, in HTTPAdapter.send(self, request, stream, timeout, verify, cert, proxies)
    664     timeout = TimeoutSauce(connect=timeout, read=timeout)
    666 try:
--> 667     resp = conn.urlopen(
    668         method=request.method,
    669         url=url,
    670         body=request.body,
    671         headers=request.headers,
    672         redirect=False,
    673         assert_same_host=False,
    674         preload_content=False,
    675         decode_content=False,
    676         retries=self.max_retries,
    677         timeout=timeout,
    678         chunked=chunked,
    679     )
    681 except (ProtocolError, OSError) as err:
    682     raise ConnectionError(err, request=request)

File ~/miniconda3/envs/Qwen/lib/python3.10/site-packages/urllib3/connectionpool.py:789, in HTTPConnectionPool.urlopen(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, preload_content, decode_content, **response_kw)
    786 response_conn = conn if not release_conn else None
    788 # Make the request on the HTTPConnection object
--> 789 response = self._make_request(
    790     conn,
    791     method,
    792     url,
    793     timeout=timeout_obj,
    794     body=body,
    795     headers=headers,
    796     chunked=chunked,
    797     retries=retries,
    798     response_conn=response_conn,
    799     preload_content=preload_content,
    800     decode_content=decode_content,
    801     **response_kw,
    802 )
    804 # Everything went great!
    805 clean_exit = True

File ~/miniconda3/envs/Qwen/lib/python3.10/site-packages/urllib3/connectionpool.py:536, in HTTPConnectionPool._make_request(self, conn, method, url, body, headers, retries, timeout, chunked, response_conn, preload_content, decode_content, enforce_content_length)
    534 # Receive the response from the server
    535 try:
--> 536     response = conn.getresponse()
    537 except (BaseSSLError, OSError) as e:
    538     self._raise_timeout(err=e, url=url, timeout_value=read_timeout)

File ~/miniconda3/envs/Qwen/lib/python3.10/site-packages/urllib3/connection.py:464, in HTTPConnection.getresponse(self)
    461 from .response import HTTPResponse
    463 # Get the response from http.client.HTTPConnection
--> 464 httplib_response = super().getresponse()
    466 try:
    467     assert_header_parsing(httplib_response.msg)

File ~/miniconda3/envs/Qwen/lib/python3.10/http/client.py:1375, in HTTPConnection.getresponse(self)
   1373 try:
   1374     try:
-> 1375         response.begin()
   1376     except ConnectionError:
   1377         self.close()

File ~/miniconda3/envs/Qwen/lib/python3.10/http/client.py:318, in HTTPResponse.begin(self)
    316 # read until we get a non-100 response
    317 while True:
--> 318     version, status, reason = self._read_status()
    319     if status != CONTINUE:
    320         break

File ~/miniconda3/envs/Qwen/lib/python3.10/http/client.py:279, in HTTPResponse._read_status(self)
    278 def _read_status(self):
--> 279     line = str(self.fp.readline(_MAXLINE + 1), "iso-8859-1")
    280     if len(line) > _MAXLINE:
    281         raise LineTooLong("status line")

File ~/miniconda3/envs/Qwen/lib/python3.10/socket.py:705, in SocketIO.readinto(self, b)
    703 while True:
    704     try:
--> 705         return self._sock.recv_into(b)
    706     except timeout:
    707         self._timeout_occurred = True

File ~/miniconda3/envs/Qwen/lib/python3.10/ssl.py:1307, in SSLSocket.recv_into(self, buffer, nbytes, flags)
   1303     if flags != 0:
   1304         raise ValueError(
   1305           "non-zero flags not allowed in calls to recv_into() on %s" %
   1306           self.__class__)
-> 1307     return self.read(nbytes, buffer)
   1308 else:
   1309     return super().recv_into(buffer, nbytes, flags)

File ~/miniconda3/envs/Qwen/lib/python3.10/ssl.py:1163, in SSLSocket.read(self, len, buffer)
   1161 try:
   1162     if buffer is not None:
-> 1163         return self._sslobj.read(len, buffer)
   1164     else:
   1165         return self._sslobj.read(len)

KeyboardInterrupt: 
In [49]:
data = pd.read_excel("/home/zhangxj/WorkFile/LCA-GPT/QA/QA.xlsx")
data.head()
Out[49]:
question answer
0 什么是生命周期分析LCA的主要目标 生命周期分析旨在评估产品或服务从原材料获取到最终处置的环境影响。
1 在LCA中如何确定研究的范围 研究范围包括定义系统边界,如输入、输出、功能单位和分析阶段。
2 文档中提到的医疗废物如何处理? 文档未直接说明医疗废物的具体处理方法,只提及了与之相关的能源消耗。
3 LCA数据清单收集阶段需要哪些信息 数据清单需收集所有过程的输入输出数据,包括资源消耗、排放和能源使用。
4 生命周期影响评价阶段的目标是什么? 该阶段旨在量化每个阶段对环境的各种影响,如气候变化、水耗和土地使用。
In [ ]:
class_list = []
for idx,item in data.iterrows():
        ques = item['question']
        ans = item['answer']
        query = "问题:"+ques+"\n答案:"+ans
        res = classify(query)
        res = normalize(res)
        class_list.append(res)
科学研究和技术服务业
科学研究和技术服务业
科学研究和技术服务业
建筑业
水利、环境和公共设施管理业
水利、环境和公共设施管理业
水利、环境和公共设施管理业
水利、环境和公共设施管理业
科学研究和技术服务业
科学研究和技术服务业
水利、环境和公共设施管理业
科学研究和技术服务业
水利、环境和公共设施管理业
水利、环境和公共设施管理业
水利、环境和公共设施管理业
水利、环境和公共设施管理业
电力、热力、燃气及水生产和供应业
科学研究和技术服务业
科学研究和技术服务业
公共管理、社会保障和社会组织
建筑业
建筑业
建筑业
制造业
科学研究和技术服务业
科学研究和技术服务业
科学研究和技术服务业
制造业
水利、环境和公共设施管理业
科学研究和技术服务业
科学研究和技术服务业
科学研究和技术服务业
科学研究和技术服务业
制造业
科学研究和技术服务业
科学研究和技术服务业
制造业
制造业
制造业
制造业
科学研究和技术服务业
电力、热力、燃气及水生产和供应业
科学研究和技术服务业
制造业;建筑业;批发和零售业;住宿和餐饮业
制造业;交通运输、仓储和邮政业;批发和零售业;住宿和餐饮业
电力、热力、燃气及水生产和供应业
制造业
公共管理、社会保障和社会组织
采矿业
制造业
采矿业
采矿业
科学研究和技术服务业
科学研究和技术服务业
采矿业
科学研究和技术服务业
科学研究和技术服务业
科学研究和技术服务业
制造业
科学研究和技术服务业
科学研究和技术服务业
科学研究和技术服务业
科学研究和技术服务业
科学研究和技术服务业
公共管理、社会保障和社会组织
科学研究和技术服务业
水利、环境和公共设施管理业
水利、环境和公共设施管理业
科学研究和技术服务业
农、林、牧、渔业
科学研究和技术服务业
水利、环境和公共设施管理业
制造业
水利、环境和公共设施管理业
科学研究和技术服务业
水利、环境和公共设施管理业
科学研究和技术服务业
科学研究和技术服务业
科学研究和技术服务业
科学研究和技术服务业
科学研究和技术服务业
科学研究和技术服务业
科学研究和技术服务业
科学研究和技术服务业
科学研究和技术服务业
科学研究和技术服务业
科学研究和技术服务业
科学研究和技术服务业
科学研究和技术服务业
科学研究和技术服务业
科学研究和技术服务业
科学研究和技术服务业
科学研究和技术服务业
科学研究和技术服务业
制造业
制造业
制造业
制造业
水利、环境和公共设施管理业
制造业
In [ ]: